repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
keeeener/nicki | platform/external/webkit/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py | 15 | 19900 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
from webkitpy.layout_tests.layout_package import test_results
from webkitpy.layout_tests.layout_package import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BuilderTest(unittest.TestCase):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def _install_fetch_build(self, failure):
def _mock_fetch_build(build_number):
build = Build(
builder=self.builder,
build_number=build_number,
revision=build_number + 1000,
is_green=build_number < 4
)
results = [self._mock_test_result(testname) for testname in failure(build_number)]
build._layout_test_results = LayoutTestResults(results)
return build
self.builder._fetch_build = _mock_fetch_build
def setUp(self):
self.buildbot = BuildBot()
self.builder = Builder(u"Test Builder \u2661", self.buildbot)
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_find_regression_window(self):
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2)
self.assertEqual(regression_window.build_before_failure(), None)
self.assertEqual(regression_window.failing_build().revision(), 1008)
def test_none_build(self):
self.builder._fetch_build = lambda build_number: None
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure(), None)
self.assertEqual(regression_window.failing_build(), None)
def test_flaky_tests(self):
self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1009)
self.assertEqual(regression_window.failing_build().revision(), 1010)
def test_failure_and_flaky(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
def test_no_results(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1003)
self.assertEqual(regression_window.failing_build().revision(), 1004)
def test_failure_after_flaky(self):
self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number > 6 else ["test3"])
regression_window = self.builder.find_regression_window(self.builder.build(10))
self.assertEqual(regression_window.build_before_failure().revision(), 1006)
self.assertEqual(regression_window.failing_build().revision(), 1007)
def test_find_blameworthy_regression_window(self):
self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004])
self.assertEqual(self.builder.find_blameworthy_regression_window(10, look_back_limit=2), None)
# Flakey test avoidance requires at least 2 red builds:
self.assertEqual(self.builder.find_blameworthy_regression_window(4), None)
self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004])
# Green builder:
self.assertEqual(self.builder.find_blameworthy_regression_window(3), None)
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
def test_build_and_revision_for_filename(self):
expectations = {
"r47483 (1)/" : (47483, 1),
"r47483 (1).zip" : (47483, 1),
}
for filename, revision_and_build in expectations.items():
self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
class BuildTest(unittest.TestCase):
def test_layout_test_results(self):
build = Build(None, None, None, None)
build._fetch_results_html = lambda: None
# Test that layout_test_results() returns None if the fetch fails.
self.assertEqual(build.layout_test_results(), None)
class BuildBotTest(unittest.TestCase):
_example_one_box_status = '''
<table>
<tr>
<td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
<td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
<tr>
<td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
<td class="LastBuild box" >no build</td>
<td align="center" class="Activity building">building<br />< 1 min</td>
<tr>
<td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
<td align="center" class="Activity idle">idle<br />3 pending</td>
<tr>
<td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
<td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
<td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
</table>
'''
_expected_example_one_box_parsings = [
{
'is_green': True,
'build_number' : 3693,
'name': u'Windows Debug (Tests)',
'built_revision': 47380,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : None,
'name': u'SnowLeopard Intel Release',
'built_revision': None,
'activity': 'building',
'pending_builds': 0,
},
{
'is_green': False,
'build_number' : 654,
'name': u'Qt Linux Release',
'built_revision': 47383,
'activity': 'idle',
'pending_builds': 3,
},
{
'is_green': True,
'build_number' : 2090,
'name': u'Qt Windows 32-bit Debug',
'built_revision': 60563,
'activity': 'building',
'pending_builds': 0,
},
]
def test_status_parsing(self):
buildbot = BuildBot()
soup = BeautifulSoup(self._example_one_box_status)
status_table = soup.find("table")
input_rows = status_table.findAll('tr')
for x in range(len(input_rows)):
status_row = input_rows[x]
expected_parsing = self._expected_example_one_box_parsings[x]
builder = buildbot._parse_builder_status_from_row(status_row)
# Make sure we aren't parsing more or less than we expect
self.assertEquals(builder.keys(), expected_parsing.keys())
for key, expected_value in expected_parsing.items():
self.assertEquals(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
def test_core_builder_methods(self):
buildbot = BuildBot()
# Override builder_statuses function to not touch the network.
def example_builder_statuses(): # We could use instancemethod() to bind 'self' but we don't need to.
return BuildBotTest._expected_example_one_box_parsings
buildbot.builder_statuses = example_builder_statuses
buildbot.core_builder_names_regexps = [ 'Leopard', "Windows.*Build" ]
self.assertEquals(buildbot.red_core_builders_names(), [])
self.assertTrue(buildbot.core_builders_are_green())
buildbot.core_builder_names_regexps = [ 'SnowLeopard', 'Qt' ]
self.assertEquals(buildbot.red_core_builders_names(), [ u'SnowLeopard Intel Release', u'Qt Linux Release' ])
self.assertFalse(buildbot.core_builders_are_green())
def test_builder_name_regexps(self):
buildbot = BuildBot()
# For complete testing, this list should match the list of builders at build.webkit.org:
example_builders = [
{'name': u'Leopard Intel Release (Build)', },
{'name': u'Leopard Intel Release (Tests)', },
{'name': u'Leopard Intel Debug (Build)', },
{'name': u'Leopard Intel Debug (Tests)', },
{'name': u'SnowLeopard Intel Release (Build)', },
{'name': u'SnowLeopard Intel Release (Tests)', },
{'name': u'SnowLeopard Intel Release (WebKit2 Tests)', },
{'name': u'SnowLeopard Intel Leaks', },
{'name': u'Windows Release (Build)', },
{'name': u'Windows 7 Release (Tests)', },
{'name': u'Windows Debug (Build)', },
{'name': u'Windows XP Debug (Tests)', },
{'name': u'Windows 7 Release (WebKit2 Tests)', },
{'name': u'GTK Linux 32-bit Release', },
{'name': u'GTK Linux 32-bit Debug', },
{'name': u'GTK Linux 64-bit Debug', },
{'name': u'Qt Linux Release', },
{'name': u'Qt Linux Release minimal', },
{'name': u'Qt Linux ARMv7 Release', },
{'name': u'Qt Windows 32-bit Release', },
{'name': u'Qt Windows 32-bit Debug', },
{'name': u'Chromium Win Release', },
{'name': u'Chromium Mac Release', },
{'name': u'Chromium Linux Release', },
{'name': u'Chromium Win Release (Tests)', },
{'name': u'Chromium Mac Release (Tests)', },
{'name': u'Chromium Linux Release (Tests)', },
{'name': u'New run-webkit-tests', },
{'name': u'WinCairo Debug (Build)', },
{'name': u'WinCE Release (Build)', },
{'name': u'EFL Linux Release (Build)', },
]
name_regexps = [
"SnowLeopard.*Build",
"SnowLeopard.*\(Test",
"SnowLeopard.*\(WebKit2 Test",
"Leopard.*",
"Windows.*Build",
"Windows.*\(Test",
"WinCairo",
"WinCE",
"EFL",
"GTK.*32",
"GTK.*64.*Debug", # Disallow the 64-bit Release bot which is broken.
"Qt",
"Chromium.*Release$",
]
expected_builders = [
{'name': u'Leopard Intel Release (Build)', },
{'name': u'Leopard Intel Release (Tests)', },
{'name': u'Leopard Intel Debug (Build)', },
{'name': u'Leopard Intel Debug (Tests)', },
{'name': u'SnowLeopard Intel Release (Build)', },
{'name': u'SnowLeopard Intel Release (Tests)', },
{'name': u'SnowLeopard Intel Release (WebKit2 Tests)', },
{'name': u'Windows Release (Build)', },
{'name': u'Windows 7 Release (Tests)', },
{'name': u'Windows Debug (Build)', },
{'name': u'Windows XP Debug (Tests)', },
{'name': u'GTK Linux 32-bit Release', },
{'name': u'GTK Linux 32-bit Debug', },
{'name': u'GTK Linux 64-bit Debug', },
{'name': u'Qt Linux Release', },
{'name': u'Qt Linux Release minimal', },
{'name': u'Qt Linux ARMv7 Release', },
{'name': u'Qt Windows 32-bit Release', },
{'name': u'Qt Windows 32-bit Debug', },
{'name': u'Chromium Win Release', },
{'name': u'Chromium Mac Release', },
{'name': u'Chromium Linux Release', },
{'name': u'WinCairo Debug (Build)', },
{'name': u'WinCE Release (Build)', },
{'name': u'EFL Linux Release (Build)', },
]
# This test should probably be updated if the default regexp list changes
self.assertEquals(buildbot.core_builder_names_regexps, name_regexps)
builders = buildbot._builder_statuses_with_names_matching_regexps(example_builders, name_regexps)
self.assertEquals(builders, expected_builders)
def test_builder_with_name(self):
buildbot = BuildBot()
builder = buildbot.builder_with_name("Test Builder")
self.assertEqual(builder.name(), "Test Builder")
self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder")
self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder")
# Override _fetch_build_dictionary function to not touch the network.
def mock_fetch_build_dictionary(self, build_number):
build_dictionary = {
"sourceStamp": {
"revision" : 2 * build_number,
},
"number" : int(build_number),
"results" : build_number % 2, # 0 means pass
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
build = builder.build(10)
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
self.assertEqual(build.is_green(), True)
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
self.assertEqual(build.is_green(), False)
self.assertEqual(builder.build(None), None)
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
<table>
<tr class="alt">
<th>Filename</th>
<th>Size</th>
<th>Content type</th>
<th>Content encoding</th>
</tr>
<tr class="directory ">
<td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
<td><b></b></td>
<td><b>[Directory]</b></td>
<td><b></b></td>
</tr>
<tr class="file alt">
<td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
<td>89K</td>
<td>[application/zip]</td>
<td></td>
</tr>
'''
_expected_files = [
{
"filename" : "r47483 (1)/",
"size" : "",
"type" : "[Directory]",
"encoding" : "",
},
{
"filename" : "r47484 (2).zip",
"size" : "89K",
"type" : "[application/zip]",
"encoding" : "",
},
]
def test_parse_build_to_revision_map(self):
buildbot = BuildBot()
files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
self.assertEqual(self._expected_files, files)
# Revision, is_green
# Ordered from newest (highest number) to oldest.
fake_builder1 = [
[2, False],
[1, True],
]
fake_builder2 = [
[2, False],
[1, True],
]
fake_builders = [
fake_builder1,
fake_builder2,
]
def _build_from_fake(self, fake_builder, index):
if index >= len(fake_builder):
return None
fake_build = fake_builder[index]
build = Build(
builder=fake_builder,
build_number=index,
revision=fake_build[0],
is_green=fake_build[1],
)
def mock_previous_build():
return self._build_from_fake(fake_builder, index + 1)
build.previous_build = mock_previous_build
return build
def _fake_builds_at_index(self, index):
return [self._build_from_fake(builder, index) for builder in self.fake_builders]
def test_last_green_revision(self):
buildbot = BuildBot()
def mock_builds_from_builders(only_core_builders):
return self._fake_builds_at_index(0)
buildbot._latest_builds_from_builders = mock_builds_from_builders
self.assertEqual(buildbot.last_green_revision(), 1)
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
return "wrong build"
def _fetch_revision_to_build_map(self):
return {'r5': 5, 'r2': 2, 'r3': 3}
def test_latest_cached_build(self):
b = Builder('builder', BuildBot())
b._fetch_build = self._fetch_build
b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
self.assertEquals("correct build", b.latest_cached_build())
def results_url(self):
return "some-url"
def test_results_zip_url(self):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEquals("some-url.zip", b.results_zip_url())
def test_results(self):
builder = Builder('builder', BuildBot())
b = Build(builder, 123, 123, True)
self.assertTrue(b.results())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
henfredemars/Fork-Lang | llvm/test/CodeGen/SystemZ/Large/branch-range-09.py | 9 | 3483 | # Test 32-bit COMPARE LOGICAL AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 14 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches.
#
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 1(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 2(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 3(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 4(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 5(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 6(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 7(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lb [[REG:%r[0-5]]], 25(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 26(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 27(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 28(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 29(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 30(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 31(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 32(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print 'define void @f1(i8 *%base, i8 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| apache-2.0 |
ol-loginov/intellij-community | plugins/hg4idea/testData/bin/hgext/bugzilla.py | 93 | 34937 | # bugzilla.py - bugzilla integration for mercurial
#
# Copyright 2006 Vadim Gelfer <[email protected]>
# Copyright 2011-2 Jim Hague <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''hooks for integrating with the Bugzilla bug tracker
This hook extension adds comments on bugs in Bugzilla when changesets
that refer to bugs by Bugzilla ID are seen. The comment is formatted using
the Mercurial template mechanism.
The bug references can optionally include an update for Bugzilla of the
hours spent working on the bug. Bugs can also be marked fixed.
Three basic modes of access to Bugzilla are provided:
1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
2. Check data via the Bugzilla XMLRPC interface and submit bug change
via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
3. Writing directly to the Bugzilla database. Only Bugzilla installations
using MySQL are supported. Requires Python MySQLdb.
Writing directly to the database is susceptible to schema changes, and
relies on a Bugzilla contrib script to send out bug change
notification emails. This script runs as the user running Mercurial,
must be run on the host with the Bugzilla install, and requires
permission to read Bugzilla configuration details and the necessary
MySQL user and password to have full access rights to the Bugzilla
database. For these reasons this access mode is now considered
deprecated, and will not be updated for new Bugzilla versions going
forward. Only adding comments is supported in this access mode.
Access via XMLRPC needs a Bugzilla username and password to be specified
in the configuration. Comments are added under that username. Since the
configuration must be readable by all Mercurial users, it is recommended
that the rights of that user are restricted in Bugzilla to the minimum
necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
email to the Bugzilla email interface to submit comments to bugs.
The From: address in the email is set to the email address of the Mercurial
user, so the comment appears to come from the Mercurial user. In the event
that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
user, the email associated with the Bugzilla username used to log into
Bugzilla is used instead as the source of the comment. Marking bugs fixed
works on all supported Bugzilla versions.
Configuration items common to all access modes:
bugzilla.version
The access type to use. Values recognized are:
:``xmlrpc``: Bugzilla XMLRPC interface.
:``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
:``3.0``: MySQL access, Bugzilla 3.0 and later.
:``2.18``: MySQL access, Bugzilla 2.18 and up to but not
including 3.0.
:``2.16``: MySQL access, Bugzilla 2.16 and up to but not
including 2.18.
bugzilla.regexp
Regular expression to match bug IDs for update in changeset commit message.
It must contain one "()" named group ``<ids>`` containing the bug
IDs separated by non-digit characters. It may also contain
a named group ``<hours>`` with a floating-point number giving the
hours worked on the bug. If no named groups are present, the first
"()" group is assumed to contain the bug IDs, and work time is not
updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
variations thereof, followed by an hours number prefixed by ``h`` or
``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
bugzilla.fixregexp
Regular expression to match bug IDs for marking fixed in changeset
commit message. This must contain a "()" named group ``<ids>` containing
the bug IDs separated by non-digit characters. It may also contain
a named group ``<hours>`` with a floating-point number giving the
hours worked on the bug. If no named groups are present, the first
"()" group is assumed to contain the bug IDs, and work time is not
updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
variations thereof, followed by an hours number prefixed by ``h`` or
``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
bugzilla.fixstatus
The status to set a bug to when marking fixed. Default ``RESOLVED``.
bugzilla.fixresolution
The resolution to set a bug to when marking fixed. Default ``FIXED``.
bugzilla.style
The style file to use when formatting comments.
bugzilla.template
Template to use when formatting comments. Overrides style if
specified. In addition to the usual Mercurial keywords, the
extension specifies:
:``{bug}``: The Bugzilla bug ID.
:``{root}``: The full pathname of the Mercurial repository.
:``{webroot}``: Stripped pathname of the Mercurial repository.
:``{hgweb}``: Base URL for browsing Mercurial repositories.
Default ``changeset {node|short} in repo {root} refers to bug
{bug}.\\ndetails:\\n\\t{desc|tabindent}``
bugzilla.strip
The number of path separator characters to strip from the front of
the Mercurial repository path (``{root}`` in templates) to produce
``{webroot}``. For example, a repository with ``{root}``
``/var/local/my-project`` with a strip of 2 gives a value for
``{webroot}`` of ``my-project``. Default 0.
web.baseurl
Base URL for browsing Mercurial repositories. Referenced from
templates as ``{hgweb}``.
Configuration items common to XMLRPC+email and MySQL access modes:
bugzilla.usermap
Path of file containing Mercurial committer email to Bugzilla user email
mappings. If specified, the file should contain one mapping per
line::
committer = Bugzilla user
See also the ``[usermap]`` section.
The ``[usermap]`` section is used to specify mappings of Mercurial
committer email to Bugzilla user email. See also ``bugzilla.usermap``.
Contains entries of the form ``committer = Bugzilla user``.
XMLRPC access mode configuration:
bugzilla.bzurl
The base URL for the Bugzilla installation.
Default ``http://localhost/bugzilla``.
bugzilla.user
The username to use to log into Bugzilla via XMLRPC. Default
``bugs``.
bugzilla.password
The password for Bugzilla login.
XMLRPC+email access mode uses the XMLRPC access mode configuration items,
and also:
bugzilla.bzemail
The Bugzilla email address.
In addition, the Mercurial email settings must be configured. See the
documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
MySQL access mode configuration:
bugzilla.host
Hostname of the MySQL server holding the Bugzilla database.
Default ``localhost``.
bugzilla.db
Name of the Bugzilla database in MySQL. Default ``bugs``.
bugzilla.user
Username to use to access MySQL server. Default ``bugs``.
bugzilla.password
Password to use to access MySQL server.
bugzilla.timeout
Database connection timeout (seconds). Default 5.
bugzilla.bzuser
Fallback Bugzilla user name to record comments with, if changeset
committer cannot be found as a Bugzilla user.
bugzilla.bzdir
Bugzilla install directory. Used by default notify. Default
``/var/www/html/bugzilla``.
bugzilla.notify
The command to run to get Bugzilla to send bug change notification
emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
id) and ``user`` (committer bugzilla email). Default depends on
version; from 2.18 it is "cd %(bzdir)s && perl -T
contrib/sendbugmail.pl %(id)s %(user)s".
Activating the extension::
[extensions]
bugzilla =
[hooks]
# run bugzilla hook on every change pulled or pushed in here
incoming.bugzilla = python:hgext.bugzilla.hook
Example configurations:
XMLRPC example configuration. This uses the Bugzilla at
``http://my-project.org/bugzilla``, logging in as user
``[email protected]`` with password ``plugh``. It is used with a
collection of Mercurial repositories in ``/var/local/hg/repos/``,
with a web interface at ``http://my-project.org/hg``. ::
[bugzilla]
bzurl=http://my-project.org/bugzilla
[email protected]
password=plugh
version=xmlrpc
template=Changeset {node|short} in {root|basename}.
{hgweb}/{webroot}/rev/{node|short}\\n
{desc}\\n
strip=5
[web]
baseurl=http://my-project.org/hg
XMLRPC+email example configuration. This uses the Bugzilla at
``http://my-project.org/bugzilla``, logging in as user
``[email protected]`` with password ``plugh``. It is used with a
collection of Mercurial repositories in ``/var/local/hg/repos/``,
with a web interface at ``http://my-project.org/hg``. Bug comments
are sent to the Bugzilla email address
``[email protected]``. ::
[bugzilla]
bzurl=http://my-project.org/bugzilla
[email protected]
password=plugh
version=xmlrpc
[email protected]
template=Changeset {node|short} in {root|basename}.
{hgweb}/{webroot}/rev/{node|short}\\n
{desc}\\n
strip=5
[web]
baseurl=http://my-project.org/hg
[usermap]
[email protected][email protected]
MySQL example configuration. This has a local Bugzilla 3.2 installation
in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
the Bugzilla database name is ``bugs`` and MySQL is
accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
with a web interface at ``http://my-project.org/hg``. ::
[bugzilla]
host=localhost
password=XYZZY
version=3.0
[email protected]
bzdir=/opt/bugzilla-3.2
template=Changeset {node|short} in {root|basename}.
{hgweb}/{webroot}/rev/{node|short}\\n
{desc}\\n
strip=5
[web]
baseurl=http://my-project.org/hg
[usermap]
[email protected][email protected]
All the above add a comment to the Bugzilla bug record of the form::
Changeset 3b16791d6642 in repository-name.
http://my-project.org/hg/repository-name/rev/3b16791d6642
Changeset commit comment. Bug 1234.
'''
from mercurial.i18n import _
from mercurial.node import short
from mercurial import cmdutil, mail, templater, util
import re, time, urlparse, xmlrpclib
testedwith = 'internal'
class bzaccess(object):
'''Base class for access to Bugzilla.'''
def __init__(self, ui):
self.ui = ui
usermap = self.ui.config('bugzilla', 'usermap')
if usermap:
self.ui.readconfig(usermap, sections=['usermap'])
def map_committer(self, user):
'''map name of committer to Bugzilla user name.'''
for committer, bzuser in self.ui.configitems('usermap'):
if committer.lower() == user.lower():
return bzuser
return user
# Methods to be implemented by access classes.
#
# 'bugs' is a dict keyed on bug id, where values are a dict holding
# updates to bug state. Recognized dict keys are:
#
# 'hours': Value, float containing work hours to be updated.
# 'fix': If key present, bug is to be marked fixed. Value ignored.
def filter_real_bug_ids(self, bugs):
'''remove bug IDs that do not exist in Bugzilla from bugs.'''
pass
def filter_cset_known_bug_ids(self, node, bugs):
'''remove bug IDs where node occurs in comment text from bugs.'''
pass
def updatebug(self, bugid, newstate, text, committer):
'''update the specified bug. Add comment text and set new states.
If possible add the comment as being from the committer of
the changeset. Otherwise use the default Bugzilla user.
'''
pass
def notify(self, bugs, committer):
'''Force sending of Bugzilla notification emails.
Only required if the access method does not trigger notification
emails automatically.
'''
pass
# Bugzilla via direct access to MySQL database.
class bzmysql(bzaccess):
'''Support for direct MySQL access to Bugzilla.
The earliest Bugzilla version this is tested with is version 2.16.
If your Bugzilla is version 3.4 or above, you are strongly
recommended to use the XMLRPC access method instead.
'''
@staticmethod
def sql_buglist(ids):
'''return SQL-friendly list of bug ids'''
return '(' + ','.join(map(str, ids)) + ')'
_MySQLdb = None
def __init__(self, ui):
try:
import MySQLdb as mysql
bzmysql._MySQLdb = mysql
except ImportError, err:
raise util.Abort(_('python mysql support not available: %s') % err)
bzaccess.__init__(self, ui)
host = self.ui.config('bugzilla', 'host', 'localhost')
user = self.ui.config('bugzilla', 'user', 'bugs')
passwd = self.ui.config('bugzilla', 'password')
db = self.ui.config('bugzilla', 'db', 'bugs')
timeout = int(self.ui.config('bugzilla', 'timeout', 5))
self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
(host, db, user, '*' * len(passwd)))
self.conn = bzmysql._MySQLdb.connect(host=host,
user=user, passwd=passwd,
db=db,
connect_timeout=timeout)
self.cursor = self.conn.cursor()
self.longdesc_id = self.get_longdesc_id()
self.user_ids = {}
self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
def run(self, *args, **kwargs):
'''run a query.'''
self.ui.note(_('query: %s %s\n') % (args, kwargs))
try:
self.cursor.execute(*args, **kwargs)
except bzmysql._MySQLdb.MySQLError:
self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
raise
def get_longdesc_id(self):
'''get identity of longdesc field'''
self.run('select fieldid from fielddefs where name = "longdesc"')
ids = self.cursor.fetchall()
if len(ids) != 1:
raise util.Abort(_('unknown database schema'))
return ids[0][0]
def filter_real_bug_ids(self, bugs):
'''filter not-existing bugs from set.'''
self.run('select bug_id from bugs where bug_id in %s' %
bzmysql.sql_buglist(bugs.keys()))
existing = [id for (id,) in self.cursor.fetchall()]
for id in bugs.keys():
if id not in existing:
self.ui.status(_('bug %d does not exist\n') % id)
del bugs[id]
def filter_cset_known_bug_ids(self, node, bugs):
'''filter bug ids that already refer to this changeset from set.'''
self.run('''select bug_id from longdescs where
bug_id in %s and thetext like "%%%s%%"''' %
(bzmysql.sql_buglist(bugs.keys()), short(node)))
for (id,) in self.cursor.fetchall():
self.ui.status(_('bug %d already knows about changeset %s\n') %
(id, short(node)))
del bugs[id]
def notify(self, bugs, committer):
'''tell bugzilla to send mail.'''
self.ui.status(_('telling bugzilla to send mail:\n'))
(user, userid) = self.get_bugzilla_user(committer)
for id in bugs.keys():
self.ui.status(_(' bug %s\n') % id)
cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
bzdir = self.ui.config('bugzilla', 'bzdir',
'/var/www/html/bugzilla')
try:
# Backwards-compatible with old notify string, which
# took one string. This will throw with a new format
# string.
cmd = cmdfmt % id
except TypeError:
cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
self.ui.note(_('running notify command %s\n') % cmd)
fp = util.popen('(%s) 2>&1' % cmd)
out = fp.read()
ret = fp.close()
if ret:
self.ui.warn(out)
raise util.Abort(_('bugzilla notify command %s') %
util.explainexit(ret)[0])
self.ui.status(_('done\n'))
def get_user_id(self, user):
'''look up numeric bugzilla user id.'''
try:
return self.user_ids[user]
except KeyError:
try:
userid = int(user)
except ValueError:
self.ui.note(_('looking up user %s\n') % user)
self.run('''select userid from profiles
where login_name like %s''', user)
all = self.cursor.fetchall()
if len(all) != 1:
raise KeyError(user)
userid = int(all[0][0])
self.user_ids[user] = userid
return userid
def get_bugzilla_user(self, committer):
'''See if committer is a registered bugzilla user. Return
bugzilla username and userid if so. If not, return default
bugzilla username and userid.'''
user = self.map_committer(committer)
try:
userid = self.get_user_id(user)
except KeyError:
try:
defaultuser = self.ui.config('bugzilla', 'bzuser')
if not defaultuser:
raise util.Abort(_('cannot find bugzilla user id for %s') %
user)
userid = self.get_user_id(defaultuser)
user = defaultuser
except KeyError:
raise util.Abort(_('cannot find bugzilla user id for %s or %s')
% (user, defaultuser))
return (user, userid)
def updatebug(self, bugid, newstate, text, committer):
'''update bug state with comment text.
Try adding comment as committer of changeset, otherwise as
default bugzilla user.'''
if len(newstate) > 0:
self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
(user, userid) = self.get_bugzilla_user(committer)
now = time.strftime('%Y-%m-%d %H:%M:%S')
self.run('''insert into longdescs
(bug_id, who, bug_when, thetext)
values (%s, %s, %s, %s)''',
(bugid, userid, now, text))
self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
values (%s, %s, %s, %s)''',
(bugid, userid, now, self.longdesc_id))
self.conn.commit()
class bzmysql_2_18(bzmysql):
'''support for bugzilla 2.18 series.'''
def __init__(self, ui):
bzmysql.__init__(self, ui)
self.default_notify = \
"cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
class bzmysql_3_0(bzmysql_2_18):
'''support for bugzilla 3.0 series.'''
def __init__(self, ui):
bzmysql_2_18.__init__(self, ui)
def get_longdesc_id(self):
'''get identity of longdesc field'''
self.run('select id from fielddefs where name = "longdesc"')
ids = self.cursor.fetchall()
if len(ids) != 1:
raise util.Abort(_('unknown database schema'))
return ids[0][0]
# Bugzilla via XMLRPC interface.
class cookietransportrequest(object):
"""A Transport request method that retains cookies over its lifetime.
The regular xmlrpclib transports ignore cookies. Which causes
a bit of a problem when you need a cookie-based login, as with
the Bugzilla XMLRPC interface.
So this is a helper for defining a Transport which looks for
cookies being set in responses and saves them to add to all future
requests.
"""
# Inspiration drawn from
# http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
# http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
cookies = []
def send_cookies(self, connection):
if self.cookies:
for cookie in self.cookies:
connection.putheader("Cookie", cookie)
def request(self, host, handler, request_body, verbose=0):
self.verbose = verbose
self.accept_gzip_encoding = False
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_cookies(h)
self.send_user_agent(h)
self.send_content(h, request_body)
# Deal with differences between Python 2.4-2.6 and 2.7.
# In the former h is a HTTP(S). In the latter it's a
# HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
# HTTP(S) has an underlying HTTP(S)Connection, so extract
# that and use it.
try:
response = h.getresponse()
except AttributeError:
response = h._conn.getresponse()
# Add any cookie definitions to our list.
for header in response.msg.getallmatchingheaders("Set-Cookie"):
val = header.split(": ", 1)[1]
cookie = val.split(";", 1)[0]
self.cookies.append(cookie)
if response.status != 200:
raise xmlrpclib.ProtocolError(host + handler, response.status,
response.reason, response.msg.headers)
payload = response.read()
parser, unmarshaller = self.getparser()
parser.feed(payload)
parser.close()
return unmarshaller.close()
# The explicit calls to the underlying xmlrpclib __init__() methods are
# necessary. The xmlrpclib.Transport classes are old-style classes, and
# it turns out their __init__() doesn't get called when doing multiple
# inheritance with a new-style class.
class cookietransport(cookietransportrequest, xmlrpclib.Transport):
def __init__(self, use_datetime=0):
if util.safehasattr(xmlrpclib.Transport, "__init__"):
xmlrpclib.Transport.__init__(self, use_datetime)
class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
def __init__(self, use_datetime=0):
if util.safehasattr(xmlrpclib.Transport, "__init__"):
xmlrpclib.SafeTransport.__init__(self, use_datetime)
class bzxmlrpc(bzaccess):
"""Support for access to Bugzilla via the Bugzilla XMLRPC API.
Requires a minimum Bugzilla version 3.4.
"""
def __init__(self, ui):
bzaccess.__init__(self, ui)
bzweb = self.ui.config('bugzilla', 'bzurl',
'http://localhost/bugzilla/')
bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
user = self.ui.config('bugzilla', 'user', 'bugs')
passwd = self.ui.config('bugzilla', 'password')
self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
'FIXED')
self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
ver = self.bzproxy.Bugzilla.version()['version'].split('.')
self.bzvermajor = int(ver[0])
self.bzverminor = int(ver[1])
self.bzproxy.User.login(dict(login=user, password=passwd))
def transport(self, uri):
if urlparse.urlparse(uri, "http")[0] == "https":
return cookiesafetransport()
else:
return cookietransport()
def get_bug_comments(self, id):
"""Return a string with all comment text for a bug."""
c = self.bzproxy.Bug.comments(dict(ids=[id], include_fields=['text']))
return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
def filter_real_bug_ids(self, bugs):
probe = self.bzproxy.Bug.get(dict(ids=sorted(bugs.keys()),
include_fields=[],
permissive=True))
for badbug in probe['faults']:
id = badbug['id']
self.ui.status(_('bug %d does not exist\n') % id)
del bugs[id]
def filter_cset_known_bug_ids(self, node, bugs):
for id in sorted(bugs.keys()):
if self.get_bug_comments(id).find(short(node)) != -1:
self.ui.status(_('bug %d already knows about changeset %s\n') %
(id, short(node)))
del bugs[id]
def updatebug(self, bugid, newstate, text, committer):
args = {}
if 'hours' in newstate:
args['work_time'] = newstate['hours']
if self.bzvermajor >= 4:
args['ids'] = [bugid]
args['comment'] = {'body' : text}
if 'fix' in newstate:
args['status'] = self.fixstatus
args['resolution'] = self.fixresolution
self.bzproxy.Bug.update(args)
else:
if 'fix' in newstate:
self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
"to mark bugs fixed\n"))
args['id'] = bugid
args['comment'] = text
self.bzproxy.Bug.add_comment(args)
class bzxmlrpcemail(bzxmlrpc):
"""Read data from Bugzilla via XMLRPC, send updates via email.
Advantages of sending updates via email:
1. Comments can be added as any user, not just logged in user.
2. Bug statuses or other fields not accessible via XMLRPC can
potentially be updated.
There is no XMLRPC function to change bug status before Bugzilla
4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
But bugs can be marked fixed via email from 3.4 onwards.
"""
# The email interface changes subtly between 3.4 and 3.6. In 3.4,
# in-email fields are specified as '@<fieldname> = <value>'. In
# 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
# in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
# compatibility, but rather than rely on this use the new format for
# 4.0 onwards.
def __init__(self, ui):
bzxmlrpc.__init__(self, ui)
self.bzemail = self.ui.config('bugzilla', 'bzemail')
if not self.bzemail:
raise util.Abort(_("configuration 'bzemail' missing"))
mail.validateconfig(self.ui)
def makecommandline(self, fieldname, value):
if self.bzvermajor >= 4:
return "@%s %s" % (fieldname, str(value))
else:
if fieldname == "id":
fieldname = "bug_id"
return "@%s = %s" % (fieldname, str(value))
def send_bug_modify_email(self, bugid, commands, comment, committer):
'''send modification message to Bugzilla bug via email.
The message format is documented in the Bugzilla email_in.pl
specification. commands is a list of command lines, comment is the
comment text.
To stop users from crafting commit comments with
Bugzilla commands, specify the bug ID via the message body, rather
than the subject line, and leave a blank line after it.
'''
user = self.map_committer(committer)
matches = self.bzproxy.User.get(dict(match=[user]))
if not matches['users']:
user = self.ui.config('bugzilla', 'user', 'bugs')
matches = self.bzproxy.User.get(dict(match=[user]))
if not matches['users']:
raise util.Abort(_("default bugzilla user %s email not found") %
user)
user = matches['users'][0]['email']
commands.append(self.makecommandline("id", bugid))
text = "\n".join(commands) + "\n\n" + comment
_charsets = mail._charsets(self.ui)
user = mail.addressencode(self.ui, user, _charsets)
bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
msg = mail.mimeencode(self.ui, text, _charsets)
msg['From'] = user
msg['To'] = bzemail
msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
sendmail = mail.connect(self.ui)
sendmail(user, bzemail, msg.as_string())
def updatebug(self, bugid, newstate, text, committer):
cmds = []
if 'hours' in newstate:
cmds.append(self.makecommandline("work_time", newstate['hours']))
if 'fix' in newstate:
cmds.append(self.makecommandline("bug_status", self.fixstatus))
cmds.append(self.makecommandline("resolution", self.fixresolution))
self.send_bug_modify_email(bugid, cmds, text, committer)
class bugzilla(object):
# supported versions of bugzilla. different versions have
# different schemas.
_versions = {
'2.16': bzmysql,
'2.18': bzmysql_2_18,
'3.0': bzmysql_3_0,
'xmlrpc': bzxmlrpc,
'xmlrpc+email': bzxmlrpcemail
}
_default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
_default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
r'(?:nos?\.?|num(?:ber)?s?)?\s*'
r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
_bz = None
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
def bz(self):
'''return object that knows how to talk to bugzilla version in
use.'''
if bugzilla._bz is None:
bzversion = self.ui.config('bugzilla', 'version')
try:
bzclass = bugzilla._versions[bzversion]
except KeyError:
raise util.Abort(_('bugzilla version %s not supported') %
bzversion)
bugzilla._bz = bzclass(self.ui)
return bugzilla._bz
def __getattr__(self, key):
return getattr(self.bz(), key)
_bug_re = None
_fix_re = None
_split_re = None
def find_bugs(self, ctx):
'''return bugs dictionary created from commit comment.
Extract bug info from changeset comments. Filter out any that are
not known to Bugzilla, and any that already have a reference to
the given changeset in their comments.
'''
if bugzilla._bug_re is None:
bugzilla._bug_re = re.compile(
self.ui.config('bugzilla', 'regexp',
bugzilla._default_bug_re), re.IGNORECASE)
bugzilla._fix_re = re.compile(
self.ui.config('bugzilla', 'fixregexp',
bugzilla._default_fix_re), re.IGNORECASE)
bugzilla._split_re = re.compile(r'\D+')
start = 0
hours = 0.0
bugs = {}
bugmatch = bugzilla._bug_re.search(ctx.description(), start)
fixmatch = bugzilla._fix_re.search(ctx.description(), start)
while True:
bugattribs = {}
if not bugmatch and not fixmatch:
break
if not bugmatch:
m = fixmatch
elif not fixmatch:
m = bugmatch
else:
if bugmatch.start() < fixmatch.start():
m = bugmatch
else:
m = fixmatch
start = m.end()
if m is bugmatch:
bugmatch = bugzilla._bug_re.search(ctx.description(), start)
if 'fix' in bugattribs:
del bugattribs['fix']
else:
fixmatch = bugzilla._fix_re.search(ctx.description(), start)
bugattribs['fix'] = None
try:
ids = m.group('ids')
except IndexError:
ids = m.group(1)
try:
hours = float(m.group('hours'))
bugattribs['hours'] = hours
except IndexError:
pass
except TypeError:
pass
except ValueError:
self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
for id in bugzilla._split_re.split(ids):
if not id:
continue
bugs[int(id)] = bugattribs
if bugs:
self.filter_real_bug_ids(bugs)
if bugs:
self.filter_cset_known_bug_ids(ctx.node(), bugs)
return bugs
def update(self, bugid, newstate, ctx):
'''update bugzilla bug with reference to changeset.'''
def webroot(root):
'''strip leading prefix of repo root and turn into
url-safe path.'''
count = int(self.ui.config('bugzilla', 'strip', 0))
root = util.pconvert(root)
while count > 0:
c = root.find('/')
if c == -1:
break
root = root[c + 1:]
count -= 1
return root
mapfile = self.ui.config('bugzilla', 'style')
tmpl = self.ui.config('bugzilla', 'template')
t = cmdutil.changeset_templater(self.ui, self.repo,
False, None, mapfile, False)
if not mapfile and not tmpl:
tmpl = _('changeset {node|short} in repo {root} refers '
'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
if tmpl:
tmpl = templater.parsestring(tmpl, quoted=False)
t.use_template(tmpl)
self.ui.pushbuffer()
t.show(ctx, changes=ctx.changeset(),
bug=str(bugid),
hgweb=self.ui.config('web', 'baseurl'),
root=self.repo.root,
webroot=webroot(self.repo.root))
data = self.ui.popbuffer()
self.updatebug(bugid, newstate, data, util.email(ctx.user()))
def hook(ui, repo, hooktype, node=None, **kwargs):
'''add comment to bugzilla for each changeset that refers to a
bugzilla bug id. only add a comment once per bug, so same change
seen multiple times does not fill bug with duplicate data.'''
if node is None:
raise util.Abort(_('hook type %s does not pass a changeset id') %
hooktype)
try:
bz = bugzilla(ui, repo)
ctx = repo[node]
bugs = bz.find_bugs(ctx)
if bugs:
for bug in bugs:
bz.update(bug, bugs[bug], ctx)
bz.notify(bugs, util.email(ctx.user()))
except Exception, e:
raise util.Abort(_('Bugzilla error: %s') % e)
| apache-2.0 |
spaceone/mils-secure | app/rapi.py | 2 | 18254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import configuration as config
import logging
from google.appengine.api import users, memcache
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp.util import run_wsgi_app
from lovely.jsonrpc import wsgi
import models
from utils import dec, parse_iso_datetime_string, get_iso_datetime_string, queue_task, queue_mail_task
from data.countries import COUNTRY_NAME_ISO_ALPHA_3_TABLE
from datetime import datetime
logging.basicConfig(level=logging.DEBUG)
# Keep the timeout short because the admin expects the "freshest" data at "all" times.
DEFAULT_CACHE_TIMEOUT = 5 # seconds
def toggle_active(key):
item = db.get(db.Key(key))
item.is_active = not item.is_active
item.put()
return item.is_active
def toggle_starred(key):
item = db.get(db.Key(key))
item.is_starred = not item.is_starred
item.put()
return item.is_starred
def toggle_deleted(key):
item = db.get(db.Key(key))
item.is_deleted = not item.is_deleted
item.put()
return item.is_deleted
def toggle_premium(key):
item = db.get(db.Key(key))
item.is_premium = not item.is_premium
item.put()
return item.is_premium
def toggle_draft(key):
item = db.get(db.Key(key))
item.is_draft = not item.is_draft
item.put()
return item.is_draft
def toggle_keys_active(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = not item.is_active
item_list.append(item)
db.put(item_list)
return keys
def toggle_keys_starred(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_starred = not item.is_starred
item_list.append(item)
db.put(item_list)
return keys
def activate_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = True
item_list.append(item)
db.put(item_list)
return keys
def activate_user_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = True
item.wants_activation = False
item_list.append(item)
queue_mail_task(url='/worker/mail/account_activation_notification/' + key, method='GET')
db.put(item_list)
return keys
def deactivate_user_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = False
item.wants_activation = False
item_list.append(item)
db.put(item_list)
return keys
def deactivate_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_active = False
item_list.append(item)
db.put(item_list)
return keys
def publish_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_draft = False
item_list.append(item)
db.put(item_list)
return keys
def draft_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_draft = True
item_list.append(item)
db.put(item_list)
return keys
def regularize_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_premium = False
item_list.append(item)
db.put(item_list)
return keys
def premiumize_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_premium = True
item_list.append(item)
db.put(item_list)
return keys
def star_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_starred = True
item_list.append(item)
db.put(item_list)
return keys
def unstar_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_starred = False
item_list.append(item)
db.put(item_list)
return keys
def delete_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_deleted = True
item_list.append(item)
db.put(item_list)
return keys
def undelete_keys(keys):
item_list = []
for key in keys:
item = db.get(db.Key(key))
item.is_deleted = False
item_list.append(item)
db.put(item_list)
return keys
delete_users = delete_keys
undelete_users = undelete_keys
star_users = star_keys
unstar_users = unstar_keys
def get_person_from_user(key):
cache_key = 'json.get_person_from_user(' + key + ')'
cached_result = memcache.get(cache_key)
if cached_result:
return cached_result
else:
user = db.get(db.Key(key))
person = user.people_singleton[0]
host_info = db.Query(models.UserHostInformation).filter('user = ', user).get() #user.host_information_set
phones = []
for phone in person.phones:
phones.append(dict(
key = str(phone.key()),
phone_number = phone.number,
phone_type = phone.phone_type
))
addresses = []
for address in person.addresses:
addresses.append(dict(
key = str(address.key()),
address_type = address.address_type,
#apartment = address.apartment,
#state_province = address.state_province,
#city = address.city,
#zip_code = address.zip_code,
#street_name = address.street_name,
#country_code = address.country,
#country_name = COUNTRY_NAME_ISO_ALPHA_3_TABLE.get(address.country, 'Unknown Country'),
#landmark = address.landmark,
#nearest_railway_line = address.nearest_railway_line,
address_line = address.address_line
))
corporate_email = user.corporate_email
if not corporate_email:
corporate_email = ''
retval = dict(
key = str(person.key()),
user_key = str(person.user.key()),
signin_email = user.signin_email,
corporate_email = corporate_email,
first_name = person.first_name,
last_name = person.last_name,
gender = person.gender,
company = person.company,
designation = person.designation,
graduation_year = person.graduation_year,
t_shirt_size = person.t_shirt_size,
birthdate = get_iso_datetime_string(person.birthdate),
addresses = addresses,
phones = phones,
is_student = person.is_student,
when_created = get_iso_datetime_string(user.when_created),
http_user_agent = host_info.http_user_agent
)
memcache.set(cache_key, retval, DEFAULT_CACHE_TIMEOUT)
return retval
def get_users():
cache_key = 'api.get_users'
cached_user_list = memcache.get(cache_key)
if cached_user_list:
return cached_user_list
else:
user_list = []
users = models.User.all().order('nickname').fetch(models.FETCH_ALL_VALUES)
for user in users:
person = user.people_singleton[0]
user_list.append(dict(username=user.username,
email=user.email,
signin_email=user.signin_email,
corporate_email=user.corporate_email,
nickname=user.nickname,
key=str(user.key()),
is_active=user.is_active,
is_deleted=user.is_deleted,
is_starred=user.is_starred,
wants_activation=user.wants_activation,
is_premium=user.is_premium,
auth_provider=user.auth_provider,
person_key=str(person.key()),
graduation_year=person.graduation_year,
when_created=get_iso_datetime_string(user.when_created)
))
memcache.set(cache_key, user_list, DEFAULT_CACHE_TIMEOUT)
return user_list
def get_books():
book_list = []
books = models.Book.all().order('title').fetch(models.FETCH_ALL_VALUES)
for book in books:
book_list.append(dict(title=book.title,
isbn_10=book.isbn_10,
isbn_13=book.isbn_13,
author_name=book.author_name,
key=str(book.key()),
is_active=book.is_active,
is_starred=book.is_starred,
is_deleted=book.is_deleted,
info_url=book.info_url
))
return book_list
def get_book(key):
book = db.get(db.Key(key))
return dict(key=str(book.key()),
title=book.title,
author_name=book.author_name,
isbn_10=book.isbn_10,
isbn_13=book.isbn_13,
is_active=book.is_active,
is_starred=book.is_starred,
is_deleted=book.is_deleted,
info_url=book.info_url
)
def is_openlibrary_cover_available(isbn):
isbn = str(isbn)
cache_key = 'cover_for_' + isbn
cached_value = memcache.get(cache_key)
if cached_value in (True, False):
return cached_value
else:
from google.appengine.api import urlfetch
cover_url = 'http://covers.openlibrary.org/b/isbn/' + isbn + '-S.jpg?default=false'
result = urlfetch.fetch(cover_url)
retval = False
if result.status_code == 200:
retval = True
memcache.set(cache_key, retval, DEFAULT_CACHE_TIMEOUT)
return retval
def save_book(key='', title='', author_name='', isbn_10='', isbn_13='', info_url=''):
if key:
book = db.get(db.Key(key))
else:
book = models.Book()
book.title = title
book.author_name = author_name
book.isbn_10 = isbn_10
book.isbn_13 = isbn_13
if info_url:
book.info_url = info_url
book.put()
return dict(key=str(book.key()),
title=book.title,
author_name=book.author_name,
isbn_10=book.isbn_10,
isbn_13=book.isbn_13,
is_active=book.is_active,
is_starred=book.is_starred,
is_deleted=book.is_deleted,
info_url=book.info_url
)
def get_articles():
cache_key = 'api.get_articles'
cached_articles = memcache.get(cache_key)
if cached_articles:
return cached_articles
else:
articles_list = []
articles = models.Article.all().order('-when_published').fetch(models.FETCH_ALL_VALUES)
for article in articles:
articles_list.append(dict(title=article.title,
is_draft=article.is_draft,
when_published=get_iso_datetime_string(article.when_published),
when_created=get_iso_datetime_string(article.when_created),
key=str(article.key()),
author_nickname=article.author.nickname(),
author_email=article.author.email(),
is_starred=article.is_starred,
is_deleted=article.is_deleted
))
memcache.set(cache_key, articles_list, DEFAULT_CACHE_TIMEOUT)
return articles_list
def get_article(key):
article = db.get(db.Key(key))
return dict(title=article.title,
is_draft=article.is_draft,
key=str(article.key()),
when_published=get_iso_datetime_string(article.when_published),
when_created=get_iso_datetime_string(article.when_created),
author_nickname=article.author.nickname(),
author_email=article.author.email(),
content=article.content,
is_starred=article.is_starred,
is_deleted=article.is_deleted
)
def get_article_content(key):
article = db.get(db.Key(key))
return dict(key=key,
content=article.content
)
def save_article(key='', title='', content='', is_draft=''):
if key:
article = db.get(db.Key(key))
else:
article = models.Article()
article.title = title
article.content = content
article.is_draft = is_draft
article.author = users.get_current_user()
article.put()
return dict(title=article.title,
is_draft=article.is_draft,
key=str(article.key()),
when_published=get_iso_datetime_string(article.when_published),
when_created=get_iso_datetime_string(article.when_created),
author_nickname=article.author.nickname(),
author_email=article.author.email(),
#content=article.content,
is_starred=article.is_starred,
is_deleted=article.is_deleted
)
def save_training_program(key='', title='', venue='', faculty='',
when_from='',
when_to='',
when_registration_ends='',
participation_counts=[],
participation_fees=[]):
if key:
training_program = db.get(db.Key(key))
else:
training_program = models.TrainingProgram
training_program.title = title
training_program.venue = venue
training_program.faculty = faculty
training_program.when_from = parse_iso_datetime_string(when_from)
training_program.when_to = parse_iso_datetime_string(when_to)
training_program.when_registration_ends = parse_iso_datetime_string(when_registration_ends)
training_program.put()
fees = []
for count, fee in izip(participation_counts, participation_fees):
tpfee = models.TrainingProgramFee()
tpfee.for_participation_count = count
if '.' in fee:
fee_integer, fee_fraction = fee.split('.')
else:
fee_integer, fee_fraction = fee, '0'
tpfee.fee_integer = dec(fee_integer)
tpfee.fee_fraction = dec(fee_fraction)
tpfee.training_program = training_program
fees.append(tpfee)
db.put(fees)
def get_training_program(key):
cache_key = 'api.get_training_program.json.' + key
cached_value = memcache.get(cache_key)
if cached_value:
return cached_value
else:
training_program = db.get(db.Key(key))
fees = [fee.to_json_dict('fee_integer', 'fee_fraction', 'for_participants_count') for fee in training_program.fees]
training_program_json_dict = training_program.to_json_dict(
'title',
'venue',
'when_from',
'when_to',
'when_registration_ends',
'max_participants',
'faculty',
'is_starred',
'is_deleted',
'is_active'
)
training_program_json_dict['fees'] = fees
memcache.set(cache_key, training_program_json_dict, DEFAULT_CACHE_TIMEOUT)
return training_program_json_dict
def get_training_programs():
cache_key = 'api.get_training_programs'
cached_values = memcache.get(cache_key)
if cached_values:
return cached_values
else:
training_programs = models.TrainingProgram.get_all()
training_programs_list = []
for training_program in training_programs:
fees = [fee.to_json_dict('fee_integer', 'fee_fraction', 'for_participants_count') for fee in training_program.fees]
training_program_json_dict = training_program.to_json_dict(
'title',
'venue',
'when_from',
'when_to',
'when_registration_ends',
'max_participants',
'faculty',
'is_starred',
'is_deleted',
'is_active')
training_program_json_dict['fees'] = fees
training_programs_list.append(training_program_json_dict)
memcache.set(cache_key, training_programs_list, DEFAULT_CACHE_TIMEOUT)
return training_programs_list
def main():
application = wsgi.WSGIJSONRPCApplication()
application.register_method(activate_keys, 'activate_keys')
application.register_method(deactivate_keys, 'deactivate_keys')
application.register_method(star_keys, 'star_keys')
application.register_method(unstar_keys, 'unstar_keys')
application.register_method(delete_keys, 'delete_keys')
application.register_method(undelete_keys, 'undelete_keys')
application.register_method(toggle_starred, 'toggle_starred')
application.register_method(toggle_active, 'toggle_active')
application.register_method(toggle_deleted, 'toggle_deleted')
application.register_method(toggle_premium, 'toggle_premium')
application.register_method(toggle_draft, 'toggle_draft')
application.register_method(regularize_keys, 'regularize_keys')
application.register_method(premiumize_keys, 'premiumize_keys')
application.register_method(publish_keys, 'publish_keys')
application.register_method(draft_keys, 'draft_keys')
application.register_method(toggle_keys_starred, 'toggle_keys_starred')
application.register_method(toggle_keys_active, 'toggle_keys_active')
application.register_method(activate_user_keys, 'activate_user_keys')
application.register_method(deactivate_user_keys, 'deactivate_user_keys')
application.register_method(get_person_from_user, 'get_person_from_user')
application.register_method(get_users, 'get_users')
application.register_method(get_articles, 'get_articles')
application.register_method(get_article, 'get_article')
application.register_method(save_article, 'save_article')
application.register_method(get_article_content, 'get_article_content')
application.register_method(get_books, 'get_books')
application.register_method(get_book, 'get_book')
application.register_method(save_book, 'save_book')
application.register_method(is_openlibrary_cover_available, 'is_cover_available')
application.register_method(get_training_programs, 'get_training_programs')
application.register_method(get_training_program, 'get_training_program')
application.register_method(save_training_program, 'save_training_program')
run_wsgi_app(application)
if __name__ == '__main__':
main()
| mit |
simod/geonode | scripts/backup-restore/gn20_to_24.py | 6 | 8830 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import re
import json
import datetime
from django.utils import timezone
class DefaultMangler(json.JSONDecoder):
""" TODO """
def __init__(self, *args, **kwargs):
self.basepk = kwargs.get('basepk', -1)
self.owner = kwargs.get('owner', 'admin')
self.datastore = kwargs.get('datastore', '')
self.siteurl = kwargs.get('siteurl', '')
super(DefaultMangler, self).__init__(*args)
def default(self, obj):
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(DefaultMangler, self).decode(json_string)
# manipulate your object any way you want
# ....
return default_obj
class ResourceBaseMangler(DefaultMangler):
""" TODO """
def default(self, obj):
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(ResourceBaseMangler, self).decode(json_string)
# manipulate your object any way you want
# ....
upload_sessions = []
for obj in default_obj:
obj['pk'] = obj['pk'] + self.basepk
obj['fields']['featured'] = False
obj['fields']['rating'] = 0
obj['fields']['popular_count'] = 0
obj['fields']['share_count'] = 0
obj['fields']['is_published'] = True
obj['fields']['thumbnail_url'] = ''
if 'distribution_url' in obj['fields']:
if not obj['fields']['distribution_url'] is None and 'layers' in obj['fields']['distribution_url']:
obj['fields']['polymorphic_ctype'] = ["layers", "layer"]
try:
p = '(?P<protocol>http.*://)?(?P<host>[^:/ ]+).?(?P<port>[0-9]*)(?P<details_url>.*)'
m = re.search(p, obj['fields']['distribution_url'])
if 'http' in m.group('protocol'):
obj['fields']['detail_url'] = self.siteurl + m.group('details_url')
else:
obj['fields']['detail_url'] = self.siteurl + obj['fields']['distribution_url']
except:
obj['fields']['detail_url'] = obj['fields']['distribution_url']
else:
obj['fields']['polymorphic_ctype'] = ["maps", "map"]
try:
obj['fields'].pop("distribution_description", None)
except:
pass
try:
obj['fields'].pop("distribution_url", None)
except:
pass
try:
obj['fields'].pop("thumbnail", None)
except:
pass
upload_sessions.append(self.add_upload_session(obj['pk'], obj['fields']['owner']))
default_obj.extend(upload_sessions)
return default_obj
def add_upload_session(self, pk, owner):
obj = dict()
obj['pk'] = pk
obj['model'] = 'layers.uploadsession'
obj['fields'] = dict()
obj['fields']['user'] = owner
obj['fields']['traceback'] = None
obj['fields']['context'] = None
obj['fields']['error'] = None
obj['fields']['processed'] = True
obj['fields']['date'] = datetime.datetime.now(from django.utils import timezone).strftime("%Y-%m-%dT%H:%M:%S")
return obj
class LayerMangler(DefaultMangler):
""" TODO """
def default(self, obj):
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(LayerMangler, self).decode(json_string)
# manipulate your object any way you want
# ....
for obj in default_obj:
obj['pk'] = obj['pk'] + self.basepk
# Retrieve the ResourceBase associated to this Layer
from geonode.base.models import ResourceBase
resource = ResourceBase.objects.get(pk=obj['pk'])
obj['fields']['upload_session'] = obj['pk']
obj['fields']['service'] = None
obj['fields']['charset'] = "UTF-8"
obj['fields']['title_en'] = resource.title
obj['fields']['data_quality_statement_en'] = ""
obj['fields']['regions'] = []
obj['fields']['supplemental_information_en'] = "No information provided"
obj['fields']['abstract_en'] = "No abstract provided"
obj['fields']['purpose_en'] = ""
obj['fields']['constraints_other_en'] = ""
obj['fields']['default_style'] = None
if self.datastore:
obj['fields']['store'] = self.datastore
else:
obj['fields']['store'] = obj['fields']['name']
try:
obj['fields'].pop("popular_count", None)
except:
pass
try:
obj['fields'].pop("share_count", None)
except:
pass
try:
obj['fields'].pop("title", None)
except:
pass
return default_obj
class LayerAttributesMangler(DefaultMangler):
""" TODO """
def default(self, obj):
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(LayerAttributesMangler, self).decode(json_string)
# manipulate your object any way you want
# ....
for obj in default_obj:
obj['pk'] = obj['pk'] + self.basepk
obj['fields']['layer'] = obj['fields']['layer'] + self.basepk
return default_obj
class MapMangler(DefaultMangler):
""" TODO """
def default(self, obj):
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(MapMangler, self).decode(json_string)
# manipulate your object any way you want
# ....
for obj in default_obj:
obj['pk'] = obj['pk'] + self.basepk
# Retrieve the ResourceBase associated to this Layer
from geonode.base.models import ResourceBase
resource = ResourceBase.objects.get(pk=obj['pk'])
obj['fields']['urlsuffix'] = ""
obj['fields']['title_en'] = resource.title
obj['fields']['featuredurl'] = ""
obj['fields']['data_quality_statement_en'] = None
obj['fields']['supplemental_information_en'] = "No information provided"
obj['fields']['abstract_en'] = ""
obj['fields']['purpose_en'] = None
obj['fields']['constraints_other_en'] = None
try:
obj['fields'].pop("popular_count", None)
except:
pass
try:
obj['fields'].pop("share_count", None)
except:
pass
try:
obj['fields'].pop("title", None)
except:
pass
return default_obj
class MapLayersMangler(DefaultMangler):
""" TODO """
def default(self, obj):
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def decode(self, json_string):
"""
json_string is basicly string that you give to json.loads method
"""
default_obj = super(MapLayersMangler, self).decode(json_string)
# manipulate your object any way you want
# ....
for obj in default_obj:
obj['pk'] = obj['pk'] + self.basepk
obj['fields']['map'] = obj['fields']['map'] + self.basepk
return default_obj
| gpl-3.0 |
sigma-random/pwnypack | tests/test_target.py | 1 | 2335 | import mock
from nose.tools import raises
import pwny
def test_default_arch_x86():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'i386'
assert pwny.Target().arch is pwny.Target.Arch.x86
def test_default_arch_x86_64():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'x86_64'
assert pwny.Target().arch is pwny.Target.Arch.x86
def test_default_arch_unknown():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'unknown'
assert pwny.Target().arch is pwny.Target.Arch.unknown
def test_default_arch_32bit():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('32bit',)
assert pwny.Target().bits is pwny.Target.Bits.bits_32
def test_default_arch_64bit():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('64bit',)
assert pwny.Target().bits is pwny.Target.Bits.bits_64
def test_set_arch():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('64bit',)
target = pwny.Target(arch=pwny.Target.Arch.x86)
assert target.arch is pwny.Target.Arch.x86
def test_default_endian():
assert pwny.Target().endian is pwny.Target.Endian.little
def test_set_endian():
target = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
assert target.endian is pwny.Target.Endian.big
def test_default_bits_x86():
target = pwny.Target(arch=pwny.Target.Arch.x86)
assert target.bits == 32
@raises(NotImplementedError)
def test_default_bits_unsupported():
target = pwny.Target(arch=pwny.Target.Arch.unknown)
_ = target.bits
def test_set__bits():
target = pwny.Target(arch=pwny.Target.Arch.x86, bits=64)
assert target.bits == 64
@raises(ValueError)
def test_set_invalid_bits():
pwny.Target(bits=33)
def test_target_assume():
target = pwny.Target()
target.assume(pwny.Target(arch=pwny.Target.Arch.arm, endian=pwny.Target.Endian.little, bits=64, mode=2))
assert target.arch is pwny.Target.Arch.arm and \
target.endian == pwny.Target.Endian.little and \
target.bits == 64 and \
target.mode == 2
| mit |
peterjoel/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/python/integration.py | 30 | 13137 | import pytest
from _pytest import python
from _pytest import runner
class TestOEJSKITSpecials(object):
def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage
testdir.makeconftest(
"""
import pytest
def pytest_pycollect_makeitem(collector, name, obj):
if name == "MyClass":
return MyCollector(name, parent=collector)
class MyCollector(pytest.Collector):
def reportinfo(self):
return self.fspath, 3, "xyz"
"""
)
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.fixture
def arg1(request):
return 42
class MyClass(object):
pass
"""
)
# this hook finds funcarg factories
rep = runner.collect_one_node(collector=modcol)
clscol = rep.result[0]
clscol.obj = lambda arg1: None
clscol.funcargs = {}
pytest._fillfuncargs(clscol)
assert clscol.funcargs["arg1"] == 42
def test_autouse_fixture(self, testdir): # rough jstests usage
testdir.makeconftest(
"""
import pytest
def pytest_pycollect_makeitem(collector, name, obj):
if name == "MyClass":
return MyCollector(name, parent=collector)
class MyCollector(pytest.Collector):
def reportinfo(self):
return self.fspath, 3, "xyz"
"""
)
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.fixture(autouse=True)
def hello():
pass
@pytest.fixture
def arg1(request):
return 42
class MyClass(object):
pass
"""
)
# this hook finds funcarg factories
rep = runner.collect_one_node(modcol)
clscol = rep.result[0]
clscol.obj = lambda: None
clscol.funcargs = {}
pytest._fillfuncargs(clscol)
assert not clscol.funcargs
def test_wrapped_getfslineno():
def func():
pass
def wrap(f):
func.__wrapped__ = f
func.patchings = ["qwe"]
return func
@wrap
def wrapped_func(x, y, z):
pass
fs, lineno = python.getfslineno(wrapped_func)
fs2, lineno2 = python.getfslineno(wrap)
assert lineno > lineno2, "getfslineno does not unwrap correctly"
class TestMockDecoration(object):
def test_wrapped_getfuncargnames(self):
from _pytest.compat import getfuncargnames
def wrap(f):
def func():
pass
func.__wrapped__ = f
return func
@wrap
def f(x):
pass
values = getfuncargnames(f)
assert values == ("x",)
@pytest.mark.xfail(
strict=False, reason="getfuncargnames breaks if mock is imported"
)
def test_wrapped_getfuncargnames_patching(self):
from _pytest.compat import getfuncargnames
def wrap(f):
def func():
pass
func.__wrapped__ = f
func.patchings = ["qwe"]
return func
@wrap
def f(x, y, z):
pass
values = getfuncargnames(f)
assert values == ("y", "z")
def test_unittest_mock(self, testdir):
pytest.importorskip("unittest.mock")
testdir.makepyfile(
"""
import unittest.mock
class T(unittest.TestCase):
@unittest.mock.patch("os.path.abspath")
def test_hello(self, abspath):
import os
os.path.abspath("hello")
abspath.assert_any_call("hello")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_unittest_mock_and_fixture(self, testdir):
pytest.importorskip("unittest.mock")
testdir.makepyfile(
"""
import os.path
import unittest.mock
import pytest
@pytest.fixture
def inject_me():
pass
@unittest.mock.patch.object(os.path, "abspath",
new=unittest.mock.MagicMock)
def test_hello(inject_me):
import os
os.path.abspath("hello")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_unittest_mock_and_pypi_mock(self, testdir):
pytest.importorskip("unittest.mock")
pytest.importorskip("mock", "1.0.1")
testdir.makepyfile(
"""
import mock
import unittest.mock
class TestBoth(object):
@unittest.mock.patch("os.path.abspath")
def test_hello(self, abspath):
import os
os.path.abspath("hello")
abspath.assert_any_call("hello")
@mock.patch("os.path.abspath")
def test_hello_mock(self, abspath):
import os
os.path.abspath("hello")
abspath.assert_any_call("hello")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_mock(self, testdir):
pytest.importorskip("mock", "1.0.1")
testdir.makepyfile(
"""
import os
import unittest
import mock
class T(unittest.TestCase):
@mock.patch("os.path.abspath")
def test_hello(self, abspath):
os.path.abspath("hello")
abspath.assert_any_call("hello")
def mock_basename(path):
return "mock_basename"
@mock.patch("os.path.abspath")
@mock.patch("os.path.normpath")
@mock.patch("os.path.basename", new=mock_basename)
def test_someting(normpath, abspath, tmpdir):
abspath.return_value = "this"
os.path.normpath(os.path.abspath("hello"))
normpath.assert_any_call("this")
assert os.path.basename("123") == "mock_basename"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
calls = reprec.getcalls("pytest_runtest_logreport")
funcnames = [
call.report.location[2] for call in calls if call.report.when == "call"
]
assert funcnames == ["T.test_hello", "test_someting"]
def test_mock_sorting(self, testdir):
pytest.importorskip("mock", "1.0.1")
testdir.makepyfile(
"""
import os
import mock
@mock.patch("os.path.abspath")
def test_one(abspath):
pass
@mock.patch("os.path.abspath")
def test_two(abspath):
pass
@mock.patch("os.path.abspath")
def test_three(abspath):
pass
"""
)
reprec = testdir.inline_run()
calls = reprec.getreports("pytest_runtest_logreport")
calls = [x for x in calls if x.when == "call"]
names = [x.nodeid.split("::")[-1] for x in calls]
assert names == ["test_one", "test_two", "test_three"]
def test_mock_double_patch_issue473(self, testdir):
pytest.importorskip("mock", "1.0.1")
testdir.makepyfile(
"""
from mock import patch
from pytest import mark
@patch('os.getcwd')
@patch('os.path')
@mark.slow
class TestSimple(object):
def test_simple_thing(self, mock_path, mock_getcwd):
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestReRunTests(object):
def test_rerun(self, testdir):
testdir.makeconftest(
"""
from _pytest.runner import runtestprotocol
def pytest_runtest_protocol(item, nextitem):
runtestprotocol(item, log=False, nextitem=nextitem)
runtestprotocol(item, log=True, nextitem=nextitem)
"""
)
testdir.makepyfile(
"""
import pytest
count = 0
req = None
@pytest.fixture
def fix(request):
global count, req
assert request != req
req = request
print ("fix count %s" % count)
count += 1
def test_fix(fix):
pass
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(
"""
*fix count 0*
*fix count 1*
"""
)
result.stdout.fnmatch_lines(
"""
*2 passed*
"""
)
def test_pytestconfig_is_session_scoped():
from _pytest.fixtures import pytestconfig
assert pytestconfig._pytestfixturefunction.scope == "session"
class TestNoselikeTestAttribute(object):
def test_module_with_global_test(self, testdir):
testdir.makepyfile(
"""
__test__ = False
def test_hello():
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
def test_class_and_method(self, testdir):
testdir.makepyfile(
"""
__test__ = True
def test_func():
pass
test_func.__test__ = False
class TestSome(object):
__test__ = False
def test_method(self):
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
calls = reprec.getreports("pytest_runtest_logreport")
assert not calls
def test_unittest_class(self, testdir):
testdir.makepyfile(
"""
import unittest
class TC(unittest.TestCase):
def test_1(self):
pass
class TC2(unittest.TestCase):
__test__ = False
def test_2(self):
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
call = reprec.getcalls("pytest_collection_modifyitems")[0]
assert len(call.items) == 1
assert call.items[0].cls.__name__ == "TC"
def test_class_with_nasty_getattr(self, testdir):
"""Make sure we handle classes with a custom nasty __getattr__ right.
With a custom __getattr__ which e.g. returns a function (like with a
RPC wrapper), we shouldn't assume this meant "__test__ = True".
"""
# https://github.com/pytest-dev/pytest/issues/1204
testdir.makepyfile(
"""
class MetaModel(type):
def __getattr__(cls, key):
return lambda: None
BaseModel = MetaModel('Model', (), {})
class Model(BaseModel):
__metaclass__ = MetaModel
def test_blah(self):
pass
"""
)
reprec = testdir.inline_run()
assert not reprec.getfailedcollections()
call = reprec.getcalls("pytest_collection_modifyitems")[0]
assert not call.items
@pytest.mark.issue351
class TestParameterize(object):
def test_idfn_marker(self, testdir):
testdir.makepyfile(
"""
import pytest
def idfn(param):
if param == 0:
return 'spam'
elif param == 1:
return 'ham'
else:
return None
@pytest.mark.parametrize('a,b', [(0, 2), (1, 2)], ids=idfn)
def test_params(a, b):
pass
"""
)
res = testdir.runpytest("--collect-only")
res.stdout.fnmatch_lines(["*spam-2*", "*ham-2*"])
def test_idfn_fixture(self, testdir):
testdir.makepyfile(
"""
import pytest
def idfn(param):
if param == 0:
return 'spam'
elif param == 1:
return 'ham'
else:
return None
@pytest.fixture(params=[0, 1], ids=idfn)
def a(request):
return request.param
@pytest.fixture(params=[1, 2], ids=idfn)
def b(request):
return request.param
def test_params(a, b):
pass
"""
)
res = testdir.runpytest("--collect-only")
res.stdout.fnmatch_lines(["*spam-2*", "*ham-2*"])
| mpl-2.0 |
colinligertwood/odoo | addons/hr_gamification/__openerp__.py | 62 | 1622 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'depends': ['gamification', 'hr'],
'description': """Use the HR ressources for the gamification process.
The HR officer can now manage challenges and badges.
This allow the user to send badges to employees instead of simple users.
Badge received are displayed on the user profile.
""",
'data': [
'security/ir.model.access.csv',
'security/gamification_security.xml',
'wizard/grant_badge.xml',
'views/gamification.xml',
'views/hr_gamification.xml',
],
'auto_install': True,
}
| agpl-3.0 |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/cbuildbot/stages/stage_results_unittest.py | 1 | 17477 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the stage results."""
from __future__ import print_function
import os
import signal
import StringIO
import sys
import time
sys.path.insert(0, os.path.abspath('%s/../../..' % os.path.dirname(__file__)))
from chromite.cbuildbot import cbuildbot_config as config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import results_lib
from chromite.cbuildbot import cbuildbot_run
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import parallel
from chromite.scripts import cbuildbot
# TODO(build): Finish test wrapper (http://crosbug.com/37517).
# Until then, this has to be after the chromite imports.
import mock
class PassStage(generic_stages.BuilderStage):
"""PassStage always works"""
class Pass2Stage(generic_stages.BuilderStage):
"""Pass2Stage always works"""
class FailStage(generic_stages.BuilderStage):
"""FailStage always throws an exception"""
FAIL_EXCEPTION = failures_lib.StepFailure("Fail stage needs to fail.")
def PerformStage(self):
"""Throw the exception to make us fail."""
raise self.FAIL_EXCEPTION
class SkipStage(generic_stages.BuilderStage):
"""SkipStage is skipped."""
config_name = 'signer_tests'
class SneakyFailStage(generic_stages.BuilderStage):
"""SneakyFailStage exits with an error."""
def PerformStage(self):
"""Exit without reporting back."""
# pylint: disable=protected-access
os._exit(1)
class SuicideStage(generic_stages.BuilderStage):
"""SuicideStage kills itself with kill -9."""
def PerformStage(self):
"""Exit without reporting back."""
os.kill(os.getpid(), signal.SIGKILL)
class SetAttrStage(generic_stages.BuilderStage):
"""Stage that sets requested run attribute to a value."""
DEFAULT_ATTR = 'unittest_value'
VALUE = 'HereTakeThis'
def __init__(self, builder_run, delay=2, attr=DEFAULT_ATTR, *args, **kwargs):
super(SetAttrStage, self).__init__(builder_run, *args, **kwargs)
self.delay = delay
self.attr = attr
def PerformStage(self):
"""Wait self.delay seconds then set requested run attribute."""
time.sleep(self.delay)
self._run.attrs.SetParallel(self.attr, self.VALUE)
def QueueableException(self):
return cbuildbot_run.ParallelAttributeError(self.attr)
class GetAttrStage(generic_stages.BuilderStage):
"""Stage that accesses requested run attribute and confirms value."""
DEFAULT_ATTR = 'unittest_value'
def __init__(self, builder_run, tester=None, timeout=5, attr=DEFAULT_ATTR,
*args, **kwargs):
super(GetAttrStage, self).__init__(builder_run, *args, **kwargs)
self.tester = tester
self.timeout = timeout
self.attr = attr
def PerformStage(self):
"""Wait for attrs.test value to show up."""
assert not self._run.attrs.HasParallel(self.attr)
value = self._run.attrs.GetParallel(self.attr, self.timeout)
if self.tester:
self.tester(value)
def QueueableException(self):
return cbuildbot_run.ParallelAttributeError(self.attr)
def TimeoutException(self):
return cbuildbot_run.AttrTimeoutError(self.attr)
class BuildStagesResultsTest(cros_test_lib.TestCase):
"""Tests for stage results and reporting."""
def setUp(self):
# Always stub RunCommmand out as we use it in every method.
self._bot_id = 'x86-generic-paladin'
build_config = config.config[self._bot_id]
self.build_root = '/fake_root'
# Create a class to hold
class Options(object):
"""Dummy class to hold option values."""
options = Options()
options.archive_base = 'gs://dontcare'
options.buildroot = self.build_root
options.debug = False
options.prebuilts = False
options.clobber = False
options.nosdk = False
options.remote_trybot = False
options.latest_toolchain = False
options.buildnumber = 1234
options.chrome_rev = None
options.branch = 'dontcare'
options.chrome_root = False
self._manager = parallel.Manager()
self._manager.__enter__()
self._run = cbuildbot_run.BuilderRun(options, build_config, self._manager)
results_lib.Results.Clear()
def tearDown(self):
# Mimic exiting with statement for self._manager.
self._manager.__exit__(None, None, None)
def _runStages(self):
"""Run a couple of stages so we can capture the results"""
# Run two pass stages, and one fail stage.
PassStage(self._run).Run()
Pass2Stage(self._run).Run()
self.assertRaises(
failures_lib.StepFailure,
FailStage(self._run).Run)
def _verifyRunResults(self, expectedResults, max_time=2.0):
actualResults = results_lib.Results.Get()
# Break out the asserts to be per item to make debugging easier
self.assertEqual(len(expectedResults), len(actualResults))
for i in xrange(len(expectedResults)):
entry = actualResults[i]
xname, xresult = expectedResults[i]
if entry.result not in results_lib.Results.NON_FAILURE_TYPES:
self.assertTrue(isinstance(entry.result, BaseException))
if isinstance(entry.result, failures_lib.StepFailure):
self.assertEqual(str(entry.result), entry.description)
self.assertTrue(entry.time >= 0 and entry.time < max_time)
self.assertEqual(xname, entry.name)
self.assertEqual(type(xresult), type(entry.result))
self.assertEqual(repr(xresult), repr(entry.result))
def _PassString(self):
record = results_lib.Result('Pass', results_lib.Results.SUCCESS, 'None',
'Pass', '', '0')
return results_lib.Results.SPLIT_TOKEN.join(record) + '\n'
def testRunStages(self):
"""Run some stages and verify the captured results"""
self.assertEqual(results_lib.Results.Get(), [])
self._runStages()
# Verify that the results are what we expect.
expectedResults = [
('Pass', results_lib.Results.SUCCESS),
('Pass2', results_lib.Results.SUCCESS),
('Fail', FailStage.FAIL_EXCEPTION),
]
self._verifyRunResults(expectedResults)
def testSuccessTest(self):
"""Run some stages and verify the captured results"""
results_lib.Results.Record('Pass', results_lib.Results.SUCCESS)
self.assertTrue(results_lib.Results.BuildSucceededSoFar())
results_lib.Results.Record('Fail', FailStage.FAIL_EXCEPTION, time=1)
self.assertFalse(results_lib.Results.BuildSucceededSoFar())
results_lib.Results.Record('Pass2', results_lib.Results.SUCCESS)
self.assertFalse(results_lib.Results.BuildSucceededSoFar())
def _TestParallelStages(self, stage_objs):
builder = cbuildbot.SimpleBuilder(self._run)
error = None
# pylint: disable=protected-access
with mock.patch.multiple(parallel._BackgroundTask, PRINT_INTERVAL=0.01):
try:
builder._RunParallelStages(stage_objs)
except parallel.BackgroundFailure as ex:
error = ex
return error
def testParallelStages(self):
stage_objs = [stage(self._run) for stage in
(PassStage, SneakyFailStage, FailStage, SuicideStage,
Pass2Stage)]
error = self._TestParallelStages(stage_objs)
self.assertTrue(error)
expectedResults = [
('Pass', results_lib.Results.SUCCESS),
('Fail', FailStage.FAIL_EXCEPTION),
('Pass2', results_lib.Results.SUCCESS),
('SneakyFail', error),
('Suicide', error),
]
self._verifyRunResults(expectedResults)
def testParallelStageCommunicationOK(self):
"""Test run attr communication betweeen parallel stages."""
def assert_test(value):
self.assertEqual(value, SetAttrStage.VALUE,
'Expected value %r to be passed between stages, but'
' got %r.' % (SetAttrStage.VALUE, value))
stage_objs = [
SetAttrStage(self._run),
GetAttrStage(self._run, assert_test, timeout=30),
GetAttrStage(self._run, assert_test, timeout=30),
]
error = self._TestParallelStages(stage_objs)
self.assertFalse(error)
expectedResults = [
('SetAttr', results_lib.Results.SUCCESS),
('GetAttr', results_lib.Results.SUCCESS),
('GetAttr', results_lib.Results.SUCCESS),
]
self._verifyRunResults(expectedResults, max_time=30.0)
# Make sure run attribute propagated up to the top, too.
value = self._run.attrs.GetParallel('unittest_value')
self.assertEqual(SetAttrStage.VALUE, value)
def testParallelStageCommunicationTimeout(self):
"""Test run attr communication between parallel stages that times out."""
def assert_test(value):
self.assertEqual(value, SetAttrStage.VALUE,
'Expected value %r to be passed between stages, but'
' got %r.' % (SetAttrStage.VALUE, value))
stage_objs = [SetAttrStage(self._run, delay=11),
GetAttrStage(self._run, assert_test, timeout=1),
]
error = self._TestParallelStages(stage_objs)
self.assertTrue(error)
expectedResults = [
('SetAttr', results_lib.Results.SUCCESS),
('GetAttr', stage_objs[1].TimeoutException()),
]
self._verifyRunResults(expectedResults, max_time=12.0)
def testParallelStageCommunicationNotQueueable(self):
"""Test setting non-queueable run attr in parallel stage."""
stage_objs = [SetAttrStage(self._run, attr='release_tag'),
GetAttrStage(self._run, timeout=2),
]
error = self._TestParallelStages(stage_objs)
self.assertTrue(error)
expectedResults = [
('SetAttr', stage_objs[0].QueueableException()),
('GetAttr', stage_objs[1].TimeoutException()),
]
self._verifyRunResults(expectedResults, max_time=12.0)
def testStagesReportSuccess(self):
"""Tests Stage reporting."""
sync_stages.ManifestVersionedSyncStage.manifest_manager = None
# Store off a known set of results and generate a report
results_lib.Results.Record('Sync', results_lib.Results.SUCCESS, time=1)
results_lib.Results.Record('Build', results_lib.Results.SUCCESS, time=2)
results_lib.Results.Record('Test', FailStage.FAIL_EXCEPTION, time=3)
results_lib.Results.Record('SignerTests', results_lib.Results.SKIPPED)
result = cros_build_lib.CommandResult(cmd=['/bin/false', '/nosuchdir'],
returncode=2)
results_lib.Results.Record(
'Archive',
cros_build_lib.RunCommandError(
'Command "/bin/false /nosuchdir" failed.\n',
result), time=4)
results = StringIO.StringIO()
results_lib.Results.Report(results)
expectedResults = (
"************************************************************\n"
"** Stage Results\n"
"************************************************************\n"
"** PASS Sync (0:00:01)\n"
"************************************************************\n"
"** PASS Build (0:00:02)\n"
"************************************************************\n"
"** FAIL Test (0:00:03) with StepFailure\n"
"************************************************************\n"
"** FAIL Archive (0:00:04) in /bin/false\n"
"************************************************************\n"
)
expectedLines = expectedResults.split('\n')
actualLines = results.getvalue().split('\n')
# Break out the asserts to be per item to make debugging easier
for i in xrange(min(len(actualLines), len(expectedLines))):
self.assertEqual(expectedLines[i], actualLines[i])
self.assertEqual(len(expectedLines), len(actualLines))
def testStagesReportError(self):
"""Tests Stage reporting with exceptions."""
sync_stages.ManifestVersionedSyncStage.manifest_manager = None
# Store off a known set of results and generate a report
results_lib.Results.Record('Sync', results_lib.Results.SUCCESS, time=1)
results_lib.Results.Record('Build', results_lib.Results.SUCCESS, time=2)
results_lib.Results.Record('Test', FailStage.FAIL_EXCEPTION,
'failException Msg\nLine 2', time=3)
result = cros_build_lib.CommandResult(cmd=['/bin/false', '/nosuchdir'],
returncode=2)
results_lib.Results.Record(
'Archive',
cros_build_lib.RunCommandError(
'Command "/bin/false /nosuchdir" failed.\n',
result),
'FailRunCommand msg', time=4)
results = StringIO.StringIO()
results_lib.Results.Report(results)
expectedResults = (
"************************************************************\n"
"** Stage Results\n"
"************************************************************\n"
"** PASS Sync (0:00:01)\n"
"************************************************************\n"
"** PASS Build (0:00:02)\n"
"************************************************************\n"
"** FAIL Test (0:00:03) with StepFailure\n"
"************************************************************\n"
"** FAIL Archive (0:00:04) in /bin/false\n"
"************************************************************\n"
"\n"
"Failed in stage Test:\n"
"\n"
"failException Msg\n"
"Line 2\n"
"\n"
"Failed in stage Archive:\n"
"\n"
"FailRunCommand msg\n"
)
expectedLines = expectedResults.split('\n')
actualLines = results.getvalue().split('\n')
# Break out the asserts to be per item to make debugging easier
for i in xrange(min(len(actualLines), len(expectedLines))):
self.assertEqual(expectedLines[i], actualLines[i])
self.assertEqual(len(expectedLines), len(actualLines))
def testStagesReportReleaseTag(self):
"""Tests Release Tag entry in stages report."""
current_version = "release_tag_string"
archive_urls = {
'board1': 'http://foo.com/bucket/bot-id1/version/index.html',
'board2': 'http://foo.com/bucket/bot-id2/version/index.html',}
# Store off a known set of results and generate a report
results_lib.Results.Record('Pass', results_lib.Results.SUCCESS, time=1)
results = StringIO.StringIO()
results_lib.Results.Report(results, archive_urls, current_version)
expectedResults = (
"************************************************************\n"
"** RELEASE VERSION: release_tag_string\n"
"************************************************************\n"
"** Stage Results\n"
"************************************************************\n"
"** PASS Pass (0:00:01)\n"
"************************************************************\n"
"** BUILD ARTIFACTS FOR THIS BUILD CAN BE FOUND AT:\n"
"** board1: %s\n"
"@@@STEP_LINK@Artifacts[board1]: bot-id1/version@%s@@@\n"
"** board2: %s\n"
"@@@STEP_LINK@Artifacts[board2]: bot-id2/version@%s@@@\n"
"************************************************************\n"
% (archive_urls['board1'], archive_urls['board1'],
archive_urls['board2'], archive_urls['board2']))
expectedLines = expectedResults.split('\n')
actualLines = results.getvalue().split('\n')
# Break out the asserts to be per item to make debugging easier
for i in xrange(len(expectedLines)):
self.assertEqual(expectedLines[i], actualLines[i])
self.assertEqual(len(expectedLines), len(actualLines))
def testSaveCompletedStages(self):
"""Tests that we can save out completed stages."""
# Run this again to make sure we have the expected results stored
results_lib.Results.Record('Pass', results_lib.Results.SUCCESS)
results_lib.Results.Record('Fail', FailStage.FAIL_EXCEPTION)
results_lib.Results.Record('Pass2', results_lib.Results.SUCCESS)
saveFile = StringIO.StringIO()
results_lib.Results.SaveCompletedStages(saveFile)
self.assertEqual(saveFile.getvalue(), self._PassString())
def testRestoreCompletedStages(self):
"""Tests that we can read in completed stages."""
results_lib.Results.RestoreCompletedStages(
StringIO.StringIO(self._PassString()))
previous = results_lib.Results.GetPrevious()
self.assertEqual(previous.keys(), ['Pass'])
def testRunAfterRestore(self):
"""Tests that we skip previously completed stages."""
# Fake results_lib.Results.RestoreCompletedStages
results_lib.Results.RestoreCompletedStages(
StringIO.StringIO(self._PassString()))
self._runStages()
# Verify that the results are what we expect.
expectedResults = [
('Pass', results_lib.Results.SUCCESS),
('Pass2', results_lib.Results.SUCCESS),
('Fail', FailStage.FAIL_EXCEPTION),
]
self._verifyRunResults(expectedResults)
def testFailedButForgiven(self):
"""Tests that warnings are flagged as such."""
results_lib.Results.Record('Warn', results_lib.Results.FORGIVEN, time=1)
results = StringIO.StringIO()
results_lib.Results.Report(results)
self.assertTrue('@@@STEP_WARNINGS@@@' in results.getvalue())
if __name__ == '__main__':
cros_test_lib.main()
| bsd-3-clause |
USStateDept/FPA_Core | openspending/command/search.py | 2 | 2007 | from openspending.command.util import create_submanager
from openspending.command.util import CommandException
from flask import current_app
import flask_whooshalchemy as whoo
from openspending.command.geometry import create as createCountries
manager = create_submanager(description='User operations')
@manager.command
def reindex():
""" Grant admin privileges to given user """
from openspending.core import db
from openspending.model import Dataset
from openspending.model.country import Country
index = whoo.whoosh_index(current_app, Dataset)
with index.writer() as writer:
for dataset in Dataset.all():
primary_field = dataset.pure_whoosh.primary_key_name
searchable = dataset.__searchable__
attrs = {}
for key in searchable:
try:
attrs[key] = unicode(getattr(dataset, key))
except AttributeError:
raise AttributeError('{0} does not have {1} field {2}'
.format("Dataset", __searchable__, key))
attrs[primary_field] = unicode(getattr(dataset, primary_field))
writer.update_document(**attrs)
#make sure we ahve all of the geometry tables in there
createCountries(silent=True)
index = whoo.whoosh_index(current_app, Country)
with index.writer() as writer:
for country in Country.all():
primary_field = country.pure_whoosh.primary_key_name
searchable = country.__searchable__
attrs = {}
for key in searchable:
try:
attrs[key] = unicode(getattr(country, key))
except AttributeError:
raise AttributeError('{0} does not have {1} field {2}'
.format("Country", __searchable__, key))
attrs[primary_field] = unicode(getattr(country, primary_field))
writer.update_document(**attrs)
| agpl-3.0 |
Chilledheart/chromium | third_party/closure_linter/closure_linter/closurizednamespacesinfo.py | 107 | 19655 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
import re
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file, the second is the identifier itself and the third is
# the line number where it's created.
self._created_namespaces = []
# A list of tuples where the first element is the namespace of an identifier
# used in the file, the second is the identifier itself and the third is the
# line number where it's used.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return set(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return set(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier, _ in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for used_namespace, used_identifier, _ in self._used_namespaces:
if namespace == used_namespace or namespace == used_identifier:
return False
return True
def GetMissingProvides(self):
"""Returns the dict of missing provided namespaces for the current file.
Returns:
Returns a dictionary of key as string and value as integer where each
string(key) is a namespace that should be provided by this file, but is
not and integer(value) is first line number where it's defined.
"""
missing_provides = dict()
for namespace, identifier, line_number in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces and
namespace not in missing_provides):
missing_provides[namespace] = line_number
return missing_provides
def GetMissingRequires(self):
"""Returns the dict of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a dictionary of key as string and value integer where each
string(key) is a namespace that should be required by this file, but is
not and integer(value) is first line number where it's used.
"""
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
created_identifiers = set()
for namespace, identifier, line_number in self._created_namespaces:
created_identifiers.add(identifier)
missing_requires = dict()
for namespace, identifier, line_number in self._used_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers and
namespace not in missing_requires):
missing_requires[namespace] = line_number
return missing_requires
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifer is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.endswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = tokenutil.GetIdentifierForToken(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraRequire' in jsdoc.suppressions):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraProvide' in jsdoc.suppressions):
self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.scope':
self._scopified_file = True
elif token.string == 'goog.setTestOnly':
# Since the message is optional, we don't want to scan to later lines.
for t in tokenutil.GetAllTokensInSameLine(token):
if t.type == TokenType.STRING_TEXT:
message = t.string
if re.match(r'^\w+(\.\w+)+$', message):
# This looks like a namespace. If it's a Closurized namespace,
# consider it created.
base_namespace = message.split('.', 1)[0]
if base_namespace in self._closurized_namespaces:
self._AddCreatedNamespace(state_tracker, message,
token.line_number)
break
else:
jsdoc = state_tracker.GetDocComment()
if token.metadata and token.metadata.aliased_symbol:
whole_identifier_string = token.metadata.aliased_symbol
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
token.line_number,
namespace=self.GetClosurizedNamespace(
whole_identifier_string))
else:
if not (token.metadata and token.metadata.is_alias_definition):
self._AddUsedNamespace(state_tracker, whole_identifier_string,
token.line_number)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
start_token = tokenutil.GetIdentifierStart(token)
if start_token and start_token != token:
# Multi-line identifier being assigned. Get the whole identifier.
identifier = tokenutil.GetIdentifierForToken(start_token)
else:
start_token = token
# If an alias is defined on the start_token, use it instead.
if (start_token and
start_token.metadata and
start_token.metadata.aliased_symbol and
not start_token.metadata.is_alias_definition):
identifier = start_token.metadata.aliased_symbol
if identifier:
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier, token.line_number)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier,
token.line_number, namespace=namespace)
elif token.type == TokenType.DOC_FLAG:
flag_type = token.attached_object.flag_type
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, TokenType.COMMENT)
self._AddUsedNamespace(state_tracker, interface.string,
token.line_number)
def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
line_number: Line number where namespace is created.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingProvide' in jsdoc.suppressions:
return
self._created_namespaces.append([namespace, identifier, line_number])
def _AddUsedNamespace(self, state_tracker, identifier, line_number):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
line_number: Line number where namespace is used.
"""
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingRequire' in jsdoc.suppressions:
return
namespace = self.GetClosurizedNamespace(identifier)
# b/5362203 If its a variable in scope then its not a required namespace.
if namespace and not state_tracker.IsVariableInScope(namespace):
self._used_namespaces.append([namespace, identifier, line_number])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].endswith('_'):
parts.pop()
return '.'.join(parts)
return None
| bsd-3-clause |
xutian/virt-test | virttest/remote_build.py | 14 | 10451 | import os
import re
from autotest.client import utils
import remote
import aexpect
import data_dir
import hashlib
import logging
class BuildError(Exception):
def __init__(self, error_info):
super(BuildError, self).__init__(error_info)
self.error_info = error_info
def __str__(self):
e_msg = "Build Error: %s" % self.error_info
return e_msg
class Builder(object):
def __init__(self, params, address, source, shell_client=None,
shell_port=None, file_transfer_client=None,
file_transfer_port=None, username=None, password=None,
make_flags="", build_dir=None, build_dir_prefix=None,
shell_linesep=None, shell_prompt=None):
"""
:param params: Dictionary with test parameters, used to get the default
values of all named parameters.
:param address: Remote host or guest address
:param source: Directory containing the source on the machine
where this script is running
:param shell_client: The client to use ('ssh', 'telnet' or 'nc')
:param shell_port: Port to connect to for the shell client
:param file_transfer_client: The file transfer client to use ('scp' or
'rss')
:param file_transfer_port: Port to connect to for the file transfer
client
:param username: Username (if required)
:param password: Password (if required)
:param make_flags: Flags to pass to the make process, default: ""
:param build_dir: Where to copy and build the files on target. If None,
use params['tmp_dir']
:param build_dir_prefix: What to name the build directory on target
If None, use the name of the source directory.
:param shell_linesep: Line separator in the shell
:param shell_prompt: Regexp that matches the prompt in the shell.
"""
def full_build_path(build_dir, directory_prefix, make_flags):
"""
Generates the full path for the build using the make flags and
supplied build location.
:return: The full path as a string
"""
extra_flags_hash = hashlib.sha1()
extra_flags_hash.update(make_flags)
directory_name = "%s-%s" % (directory_prefix,
(extra_flags_hash.hexdigest())[:8])
return os.path.join(build_dir, directory_name)
def def_helper(arg, param, default):
if arg is None:
return params.get(param, default)
else:
return arg
self.address = address
self.source = os.path.normpath(source)
self.client = def_helper(shell_client, "shell_client", "ssh")
self.port = def_helper(shell_port, "shell_port", "22")
self.file_transfer_client = def_helper(file_transfer_client,
"file_transfer_client", "scp")
self.file_transfer_port = def_helper(file_transfer_port,
"file_transfer_port", "22")
self.username = def_helper(username, "username", "root")
self.password = def_helper(password, "password", "redhat")
self.make_flags = make_flags
self.build_dir = def_helper(build_dir, "tmp_dir", "/tmp")
if build_dir_prefix is None:
build_dir_prefix = os.path.basename(source)
self.full_build_path = full_build_path(self.build_dir,
build_dir_prefix, make_flags)
self.linesep = def_helper(shell_linesep, "shell_linesep", "\n")
self.prompt = def_helper(shell_prompt, "shell_prompt",
"^\[.*\][\#\$]\s*)$")
self.session = remote.remote_login(self.client, self.address,
self.port, self.username,
self.password, self.prompt,
self.linesep, timeout=360)
def sync_directories(self):
"""
Synchronize the directories between the local and remote machines
:returns: True if any files needed to be copied; False otherwise. Does
not support symlinks.
"""
def get_local_hashes(path):
"""
Create a dict of the hashes of all files in path on the local
machine.
:param path: Path to search
"""
def hash_file(file_name):
"""
Calculate hex-encoded hash of a file
:param file_name: File to hash
"""
f = open(file_name, mode='rb')
h = hashlib.sha1()
while True:
buf = f.read(4096)
if not buf:
break
h.update(buf)
return h.hexdigest()
def visit(arg, dir_name, file_names):
"""
Callback function to calculate and store hashes
:param arg: Tuple with base path and the hash that will contain
the results.
:param dir_name: Current directory
:param file_names: File names in the current directory
"""
(base_path, result) = arg
for file_name in file_names:
path = os.path.join(dir_name, file_name)
if os.path.isfile(path):
result[os.path.relpath(path, base_path)] = hash_file(path)
result = {}
os.path.walk(path, visit, (path, result))
return result
def get_remote_hashes(path, session, linesep):
"""
Create a dict of the hashes of all files in path on the remote
machine.
:param path: Path to search
:param session: Session object to use
:param linesep: Line separation string for the remote system
"""
cmd = 'test \! -d %s || find %s -type f | xargs sha1sum' % (path,
path)
status, output = session.cmd_status_output(cmd)
if not status == 0:
raise BuildError("Unable to get hashes of remote files: '%s'"
% output)
result = {}
# Output is "<sum> <filename><linesep><sum> <filename>..."
for line in output.split(linesep):
if re.match("^[a-f0-9]{32,} [^ ].*$", line):
(h, f) = line.split(None, 1)
result[os.path.relpath(f, path)] = h
return result
def list_recursive_dirnames(path):
"""
List all directories that exist in path on the local machine
:param path: Path to search
"""
def visit(arg, dir_name, file_names):
"""
Callback function list alla directories
:param arg: Tuple with base path and the list that will contain
the results.
:param dir_name: Current directory
:param file_names: File names in the current directory
"""
(base_path, result) = arg
for file_name in file_names:
path = os.path.join(dir_name, file_name)
if os.path.isdir(path):
result.append(os.path.relpath(path, base_path))
result = []
os.path.walk(path, visit, (path, result))
return result
remote_hashes = get_remote_hashes(self.full_build_path, self.session,
self.linesep)
local_hashes = get_local_hashes(self.source)
to_transfer = []
for rel_path in local_hashes.keys():
rhash = remote_hashes.get(rel_path)
if rhash is None or not rhash == local_hashes[rel_path]:
to_transfer.append(rel_path)
need_build = False
if to_transfer:
logging.info("Need to copy files to %s on target" %
self.full_build_path)
need_build = True
# Create all directories
dirs = list_recursive_dirnames(self.source)
if dirs:
dirs_text = " ".join(dirs)
fmt_arg = (self.full_build_path, self.full_build_path,
dirs_text)
cmd = 'mkdir -p %s && cd %s && mkdir -p %s' % fmt_arg
else:
cmd = 'mkdir -p %s' % self.full_build_path
status, output = self.session.cmd_status_output(cmd)
if not status == 0:
raise BuildError("Unable to create remote directories: '%s'"
% output)
# Copy files
for file_name in to_transfer:
local_path = os.path.join(self.source, file_name)
remote_path = os.path.join(self.full_build_path, file_name)
remote.copy_files_to(self.address, self.file_transfer_client,
self.username, self.password,
self.file_transfer_port, local_path,
remote_path)
else:
logging.info("Directory %s on target already up-to-date" %
self.full_build_path)
return need_build
def make(self):
"""
Execute make on the remote system
"""
logging.info("Building in %s on target" % self.full_build_path)
cmd = 'make -C %s %s' % (self.full_build_path, self.make_flags)
status, output = self.session.cmd_status_output(cmd)
if not status == 0:
raise BuildError("Unable to make: '%s'" % output)
def build(self):
"""
Synchronize all files and execute 'make' on the remote system if
needed.
:returns: The path to the build directory on the remote machine
"""
if self.sync_directories():
self.make()
return self.full_build_path
| gpl-2.0 |
SummerZheng/iRun_YN | app/static/py/Vertex.py | 1 | 1496 | ###
# Class node
###
from math import sin, cos, sqrt, atan2, radians
R = 6373000
maxVal = 99999.9
class Vertex:
#cor is a tuple of (lon, lat)
def __init__(self, cor):
self.id = cor
self.connectedTo = {}
def addNeighbor(self, nbrID, dist=0, score=0):
self.connectedTo[nbrID] = [dist, score]
#print overload
def __str__(self):
s = str(self.id) + ' connectedTo: '
for x in self.connectedTo:
s += str(x) + ' d='+str(self.connectedTo[x][0])
s += ', s=' + str(self.connectedTo[x][1])+'; '
return s
def getConnections(self):
return self.connectedTo.keys()
def neighborNumber(self):
return len(self.connectedTo)
def getID(self):
return self.id
def getLon(self):
return self.id[0]
def getLat(self):
return self.id[1]
def getLength(self,nbrID):
return self.connectedTo[nbrID][0]
def getScore(self, nbrID):
return self.connectedTo[nbrID][1]
def dist2(self, nbr):
lon1 = radians(self.getLon())
lat1 = radians(self.getLat())
lon2 = radians(nbr.getLon())
lat2 = radians(nbr.getLat())
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
distance = R * c
return distance
'''
#Test
v = Vertex((-71.355, 42.400))
print v
'''
| mit |
rosmo/ansible | lib/ansible/modules/windows/win_disk_image.py | 52 | 2042 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
module: win_disk_image
short_description: Manage ISO/VHD/VHDX mounts on Windows hosts
version_added: '2.3'
description:
- Manages mount behavior for a specified ISO, VHD, or VHDX image on a Windows host. When C(state) is C(present),
the image will be mounted under a system-assigned drive letter, which will be returned in the C(mount_path) value
of the module result.
- Requires Windows 8+ or Windows Server 2012+.
options:
image_path:
description:
- Path to an ISO, VHD, or VHDX image on the target Windows host (the file cannot reside on a network share)
type: str
required: yes
state:
description:
- Whether the image should be present as a drive-letter mount or not.
type: str
choices: [ absent, present ]
default: present
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
# Run installer from mounted ISO, then unmount
- name: Ensure an ISO is mounted
win_disk_image:
image_path: C:\install.iso
state: present
register: disk_image_out
- name: Run installer from mounted ISO
win_package:
path: '{{ disk_image_out.mount_paths[0] }}setup\setup.exe'
product_id: 35a4e767-0161-46b0-979f-e61f282fee21
state: present
- name: Unmount ISO
win_disk_image:
image_path: C:\install.iso
state: absent
'''
RETURN = r'''
mount_path:
description: Filesystem path where the target image is mounted, this has been deprecated in favour of C(mount_paths).
returned: when C(state) is C(present)
type: str
sample: F:\
mount_paths:
description: A list of filesystem paths mounted from the target image.
returned: when C(state) is C(present)
type: list
sample: [ 'E:\', 'F:\' ]
'''
| gpl-3.0 |
stevereyes01/pycbc | tools/timing/match_perf.py | 10 | 3186 | #!/usr/bin/env python
from pycbc.scheme import *
from pycbc.types import *
from pycbc.filter import *
from pycbc.psd import *
import pycbc
from math import log
import numpy
import numpy.random
import sys
from optparse import OptionParser
from math import sin
import gc
parser = OptionParser()
import logging
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
parser.add_option('--scheme','-s', type = 'choice',
choices = ('cpu','cuda','opencl'),
default = 'cpu', dest = 'scheme',
help = 'specifies processing scheme, can be cpu [default], cuda, or opencl')
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = 'specifies a GPU device to use for CUDA or OpenCL, 0 by default')
parser.add_option('--size', type=int, help='fft size in log2')
parser.add_option('--iterations', type=int, help='Number of iterations to perform')
(options, args) = parser.parse_args()
#Changing the optvalues to a dict makes them easier to read
_options = vars(options)
if _options['scheme'] == 'cpu':
ctx = CPUScheme()
if _options['scheme'] == 'cuda':
ctx = CUDAScheme(device_num=_options['devicenum'])
if _options['scheme'] == 'opencl':
ctx = OpenCLScheme(device_num=_options['devicenum'])
size = options.size
niter = options.iterations
if type(ctx) is CUDAScheme:
print("RUNNING ON ", ctx.device.name())
else:
print("RUNNING ON CPU")
N = 2**size
print(" SIZE ", int(log(N,2)))
n = N/2 +1
a = numpy.zeros(N) + 1000
noise = numpy.random.normal(a).astype(numpy.float32)
with ctx:
nplus2 = TimeSeries(noise,delta_t=1.0/4096,dtype=float32)
ntilde2 = make_frequency_series(nplus2)
psd2 = ntilde2.squared_norm()
o = match(ntilde2,ntilde2,psd=psd2)
o = match(ntilde2,ntilde2,psd=None, v1_norm=1, v2_norm=1)
o = matched_filter_core(ntilde2, ntilde2)
out=zeros(N,dtype=complex64)
o = overlap_cplx(ntilde2, ntilde2, normalized=False)
ntilde3 = ntilde2 +10j
def matcht():
with ctx:
for i in range(0,niter):
o,ind = match(ntilde2,ntilde2,psd=psd2)
def match_fast():
with ctx:
for i in range(0,niter):
o,ind = match(ntilde2,ntilde2,psd=None,v1_norm=1,v2_norm=1)
def ovlp():
with ctx:
for i in range(0,niter):
o = overlap_cplx(ntilde2,ntilde3, normalized=False)
def filter_fast():
with ctx:
for i in range(0,niter):
snr, corr, norm = matched_filter_core(ntilde2, ntilde2, psd=None, h_norm=1, out=out)
import timeit
gt = timeit.Timer(ovlp)
t = (1000 * gt.timeit(number=1)/niter)
print("Foverlap %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
gt = timeit.Timer(matcht)
t = (1000 * gt.timeit(number=1)/niter)
print("MATCH %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
gt = timeit.Timer(match_fast)
t = (1000 * gt.timeit(number=1)/niter)
print("MATCH FAST %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
gt = timeit.Timer(filter_fast)
t = (1000 * gt.timeit(number=1)/niter)
print("FILTER FAST %.2f msec" % t, " %5.1f op/min " % (1000 *60 /t))
| gpl-3.0 |
lbechberger/ConceptualSpaces | conceptual_spaces/test/correlation_analysis.py | 1 | 6052 | # -*- coding: utf-8 -*-
"""
Creates scatter plots comparing the results of different variants for computing conceptual betweenness.
Created on Thu Sep 5 20:35:37 2019
@author: lbechberger
"""
import sys
sys.path.append("..")
import cs.cs
import random
import matplotlib.pyplot as plt
from itertools import combinations
from scipy.stats import pearsonr, spearmanr
def random_cuboid(dimensions, domains, min_val, max_val):
p_min = []
p_max = []
for dim in dimensions:
p_min.append(random.uniform(0, ((min_val+max_val)/2) - 0.01))
p_max.append(random.uniform(((min_val+max_val)/2) + 0.01, max_val))
return cs.cuboid.Cuboid(p_min, p_max, domains)
def random_weights(domains):
dim_weights = {}
dom_weights = {}
for dom, dims in domains.items():
dom_weights[dom] = random.uniform(0.01, 1.0)
local_dim_weights = {}
for dim in dims:
local_dim_weights[dim] = random.uniform(0.01, 1.0)
dim_weights[dom] = local_dim_weights
return cs.weights.Weights(dom_weights, dim_weights)
def scatter(n_dims, cuboids_per_concept, params, num_samples, max_dim_per_domain, operation):
"""Creates scatter plots for the betweenness values returned by different combinations of alphas and methods.
Parameters:
n_dims: number of dimensions
cuboids_per_concept: number of cuboids per concept
params: a dictionary mapping from configuration names to a dictionary of named parameters for the operation
num_samples: number of samples to draw
max_dim_per_domain: maximal number of dimensions per domain
operation: operation to evaluate"""
dimensions = list(range(n_dims))
random.seed(42)
results = {}
for key, value in params.items():
results[key] = []
counter = 0
fails = 0
while counter < num_samples:
# create a random domain structure
domains = {}
dimensions_left = dimensions
j = 0
while len(dimensions_left) > 0:
num_dims = random.randint(1, min(len(dimensions_left), max_dim_per_domain))
dims = random.sample(dimensions_left, num_dims)
domains[j] = list(dims)
dimensions_left = [dim for dim in dimensions_left if dim not in dims]
j += 1
# make the conceptual space
cs.cs.init(n_dims, domains)
# create three concepts with random identical weights, random cuboids, maximal mu and random c
w = random_weights(domains)
c1_list = []
c2_list = []
c3_list = []
for i in range(cuboids_per_concept):
c1_list.append(random_cuboid(dimensions, domains, 0.0, 1.0))
c2_list.append(random_cuboid(dimensions, domains, 0.0, 1.0))
c3_list.append(random_cuboid(dimensions, domains, 0.0, 1.0))
s1 = cs.core.from_cuboids(c1_list, domains)
s2 = cs.core.from_cuboids(c2_list, domains)
s3 = cs.core.from_cuboids(c3_list, domains)
f1 = cs.concept.Concept(s1, random.uniform(0.01, 1.0), random.uniform(1.0, 50.0), w)
f2 = cs.concept.Concept(s2, random.uniform(0.01, 1.0), random.uniform(1.0, 50.0), w)
f3 = cs.concept.Concept(s3, random.uniform(0.01, 1.0), random.uniform(1.0, 50.0), w)
local_res = {}
try:
for config_name, param_dict in params.items():
local_res[config_name] = operation(f1, f2, f3, param_dict)
except Exception:
fails += 1
continue
for key, res in local_res.items():
results[key].append(res)
counter += 1
if counter % 50 == 0:
print(("{0}/{1} ...".format(counter, fails)))
print(("ran {0} examples, failed {1} times".format(counter, fails)))
# all pairs of configurations
for first_config, second_config in combinations(list(results.keys()), 2):
# draw the plot
fig, ax = plt.subplots(figsize=(12,12))
ax.tick_params(axis="x", labelsize=16)
ax.tick_params(axis="y", labelsize=16)
ax.set_xlim(-0.01,1.01)
ax.set_ylim(-0.01,1.01)
ax.scatter(results[first_config], results[second_config])
plt.xlabel(first_config, fontsize = 20)
plt.ylabel(second_config, fontsize = 20)
plt.show()
# compute the correlations
pearson, _ = pearsonr(results[first_config], results[second_config])
spearman, _ = spearmanr(results[first_config], results[second_config])
print(('{0} - {1}: Pearson {2}, Spearman {3}'.format(first_config, second_config, pearson, spearman)))
####################################################################################################################################
# MAIN: here we select what to run at all
config_to_run = 'betweenness'
params = {}
params['similarity'] = {r'$Sim_S$': {'method': 'subset'},
r'$Sim_J$': {'method': 'Jaccard'}}
params['betweenness'] = {r'$B_{soft}^{min}$': {'method': 'minimum'},
r'$B_{soft}^{int}$ (20 $\alpha$-cuts)': {'method': 'integral', 'num_alpha_cuts': 20},
r'$B_{soft}^{int}$ (100 $\alpha$-cuts)': {'method': 'integral', 'num_alpha_cuts': 100}}
config = {}
config['similarity'] = {'number_of_samples': 1000, 'number_of_dimensions': 4, 'max_dim_per_dom': 4, 'number_of_cuboids_per_concept': 2}
config['betweenness'] = {'number_of_samples': 1000, 'number_of_dimensions': 4, 'max_dim_per_dom': 4, 'number_of_cuboids_per_concept': 2}
operations = {'similarity': lambda x,y,z,p: x.similarity_to(y,**p),
'betweenness': lambda x,y,z,p: x.between(y,z,**p)}
print((config_to_run, config[config_to_run]))
scatter(config[config_to_run]['number_of_dimensions'], config[config_to_run]['number_of_cuboids_per_concept'], params[config_to_run], config[config_to_run]['number_of_samples'], config[config_to_run]['max_dim_per_dom'], operations[config_to_run])
| mit |
nwokeo/supysonic | venv/lib/python2.7/site-packages/PIL/SunImagePlugin.py | 8 | 4318 | #
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile, ImagePalette
from ._binary import i32be as i32
__version__ = "0.3"
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == 0x59a66a95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# The Sun Raster file header is 32 bytes in length
# and has the following format:
# typedef struct _SunRaster
# {
# DWORD MagicNumber; /* Magic (identification) number */
# DWORD Width; /* Width of image in pixels */
# DWORD Height; /* Height of image in pixels */
# DWORD Depth; /* Number of bits per pixel */
# DWORD Length; /* Size of image data in bytes */
# DWORD Type; /* Type of raster file */
# DWORD ColorMapType; /* Type of color map */
# DWORD ColorMapLength; /* Size of the color map in bytes */
# } SUNRASTER;
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59a66a95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
data_length = i32(s[16:20]) # unreliable, ignore.
file_type = i32(s[20:24])
palette_type = i32(s[24:28]) # 0: None, 1: RGB, 2: Raw/arbitrary
palette_length = i32(s[28:32])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 4:
self.mode, rawmode = "L", "L;4"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
if file_type == 3:
self.mode, rawmode = "RGB", "RGB"
else:
self.mode, rawmode = "RGB", "BGR"
elif depth == 32:
if file_type == 3:
self.mode, rawmode = 'RGB', 'RGBX'
else:
self.mode, rawmode = 'RGB', 'BGRX'
else:
raise SyntaxError("Unsupported Mode/Bit Depth")
if palette_length:
if palette_length > 1024:
raise SyntaxError("Unsupported Color Palette Length")
if palette_type != 1:
raise SyntaxError("Unsupported Palette Type")
offset = offset + palette_length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length))
if self.mode == "L":
self.mode = "P"
rawmode = rawmode.replace('L', 'P')
# 16 bit boundaries on stride
stride = ((self.size[0] * depth + 15) // 16) * 2
# file type: Type is the version (or flavor) of the bitmap
# file. The following values are typically found in the Type
# field:
# 0000h Old
# 0001h Standard
# 0002h Byte-encoded
# 0003h RGB format
# 0004h TIFF format
# 0005h IFF format
# FFFFh Experimental
# Old and standard are the same, except for the length tag.
# byte-encoded is run-length-encoded
# RGB looks similar to standard, but RGB byte order
# TIFF and IFF mean that they were converted from T/IFF
# Experimental means that it's something else.
# (https://www.fileformat.info/format/sunraster/egff.htm)
if file_type in (0, 1, 3, 4, 5):
self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))]
elif file_type == 2:
self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)]
else:
raise SyntaxError('Unsupported Sun Raster file type')
#
# registry
Image.register_open(SunImageFile.format, SunImageFile, _accept)
Image.register_extension(SunImageFile.format, ".ras")
| agpl-3.0 |
jbarcia/Empire | lib/modules/code_execution/invoke_shellcodemsil.py | 22 | 3011 | import re
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ShellcodeMSIL',
'Author': ['@mattifestation'],
'Description': ('Execute shellcode within the context of the running PowerShell '
'process without making any Win32 function calls. Warning: This script has '
'no way to validate that your shellcode is 32 vs. 64-bit!'
'Note: Your shellcode must end in a ret (0xC3) and maintain proper stack '
'alignment or PowerShell will crash!'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'http://www.exploit-monday.com',
'https://github.com/mattifestation/PowerSploit/blob/master/CodeExecution/Invoke-ShellcodeMSIL.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Shellcode' : {
'Description' : 'Shellcode to inject, 0x00,0x0a,... format.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/code_execution/Invoke-ShellcodeMSIL.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-ShellcodeMSIL"
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if option.lower() == "shellcode":
# transform the shellcode to the correct format
sc = ",0".join(values['Value'].split("\\"))[1:]
script += " -" + str(option) + " @(" + sc + ")"
return script | bsd-3-clause |
mvidalgarcia/indico | indico/modules/events/contributions/forms.py | 2 | 11006 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import timedelta
from flask import request
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields import BooleanField, HiddenField, SelectField, StringField, TextAreaField
from wtforms.validators import DataRequired, ValidationError
from indico.core.db import db
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.modules.events.abstracts.settings import BOASortField
from indico.modules.events.contributions.fields import (ContributionPersonLinkListField,
SubContributionPersonLinkListField)
from indico.modules.events.contributions.models.references import ContributionReference, SubContributionReference
from indico.modules.events.contributions.models.types import ContributionType
from indico.modules.events.fields import ReferencesField
from indico.modules.events.util import check_permissions
from indico.util.date_time import get_day_end
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import (HiddenFieldList, IndicoDateTimeField, IndicoEnumSelectField, IndicoLocationField,
IndicoProtectionField, IndicoTagListField, TimeDeltaField)
from indico.web.forms.fields.principals import PermissionsField
from indico.web.forms.validators import DateTimeRange, MaxDuration
from indico.web.forms.widgets import SwitchWidget
class ContributionForm(IndicoForm):
title = StringField(_("Title"), [DataRequired()])
description = TextAreaField(_("Description"))
start_dt = IndicoDateTimeField(_("Start date"),
[DataRequired(),
DateTimeRange(earliest=lambda form, field: form._get_earliest_start_dt(),
latest=lambda form, field: form._get_latest_start_dt())],
allow_clear=False,
description=_("Start date of the contribution"))
duration = TimeDeltaField(_("Duration"), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20), units=('minutes', 'hours'))
type = QuerySelectField(_("Type"), get_label='name', allow_blank=True, blank_text=_("No type selected"))
person_link_data = ContributionPersonLinkListField(_("People"))
location_data = IndicoLocationField(_("Location"))
keywords = IndicoTagListField(_('Keywords'))
references = ReferencesField(_("External IDs"), reference_class=ContributionReference,
description=_("Manage external resources for this contribution"))
board_number = StringField(_("Board Number"))
code = StringField(_('Programme code'))
@generated_data
def render_mode(self):
return RenderMode.markdown
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.contrib = kwargs.pop('contrib', None)
self.session_block = kwargs.get('session_block')
self.timezone = self.event.timezone
to_schedule = kwargs.pop('to_schedule', False)
super(ContributionForm, self).__init__(*args, **kwargs)
self.type.query = self.event.contribution_types
if self.event.type != 'conference':
self.person_link_data.label.text = _("Speakers")
if not self.type.query.count():
del self.type
if not to_schedule and (self.contrib is None or not self.contrib.is_scheduled):
del self.start_dt
def _get_earliest_start_dt(self):
return self.session_block.start_dt if self.session_block else self.event.start_dt
def _get_latest_start_dt(self):
return self.session_block.end_dt if self.session_block else self.event.end_dt
def validate_duration(self, field):
start_dt = self.start_dt.data if self.start_dt else None
if start_dt:
end_dt = start_dt + field.data
if self.session_block and end_dt > self.session_block.end_dt:
raise ValidationError(_("With the current duration the contribution exceeds the block end date"))
if end_dt > self.event.end_dt:
raise ValidationError(_('With the current duration the contribution exceeds the event end date'))
@property
def custom_field_names(self):
return tuple([field_name for field_name in self._fields if field_name.startswith('custom_')])
class ContributionProtectionForm(IndicoForm):
permissions = PermissionsField(_("Permissions"), object_type='contribution')
protection_mode = IndicoProtectionField(_('Protection mode'), protected_object=lambda form: form.protected_object,
acl_message_url=lambda form: url_for('contributions.acl_message',
form.protected_object))
def __init__(self, *args, **kwargs):
self.protected_object = contribution = kwargs.pop('contrib')
self.event = contribution.event
super(ContributionProtectionForm, self).__init__(*args, **kwargs)
def validate_permissions(self, field):
check_permissions(self.event, field)
class SubContributionForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = TextAreaField(_('Description'))
duration = TimeDeltaField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20), units=('minutes', 'hours'))
speakers = SubContributionPersonLinkListField(_('Speakers'), allow_submitters=False, allow_authors=False,
description=_('The speakers of the subcontribution'))
references = ReferencesField(_("External IDs"), reference_class=SubContributionReference,
description=_("Manage external resources for this sub-contribution"))
code = StringField(_('Programme code'))
@generated_data
def render_mode(self):
return RenderMode.markdown
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.subcontrib = kwargs.pop('subcontrib', None)
super(SubContributionForm, self).__init__(*args, **kwargs)
class ContributionStartDateForm(IndicoForm):
start_dt = IndicoDateTimeField(_('Start date'), [DataRequired(),
DateTimeRange(earliest=lambda form, field: form.event.start_dt,
latest=lambda form, field: form.event.end_dt)],
allow_clear=False)
def __init__(self, *args, **kwargs):
self.contrib = kwargs.pop('contrib')
self.event = self.contrib.event
self.timezone = self.event.timezone
super(ContributionStartDateForm, self).__init__(*args, **kwargs)
def validate_start_dt(self, field):
event = self.contrib.event
day = self.contrib.start_dt.astimezone(event.tzinfo).date()
if day == event.end_dt_local.date():
latest_dt = event.end_dt
error_msg = _("With this time, the contribution would exceed the event end time.")
else:
latest_dt = get_day_end(day, tzinfo=event.tzinfo)
error_msg = _("With this time, the contribution would exceed the current day.")
if field.data + self.contrib.duration > latest_dt:
raise ValidationError(error_msg)
class ContributionDurationForm(IndicoForm):
duration = TimeDeltaField(_('Duration'), [DataRequired(), MaxDuration(timedelta(days=1))],
default=timedelta(minutes=20), units=('minutes', 'hours'))
def __init__(self, *args, **kwargs):
self.contrib = kwargs.pop('contrib')
super(ContributionDurationForm, self).__init__(*args, **kwargs)
def validate_duration(self, field):
if field.errors:
return
if self.contrib.is_scheduled:
event = self.contrib.event
day = self.contrib.start_dt.astimezone(event.tzinfo).date()
if day == event.end_dt_local.date():
latest_dt = event.end_dt
error_msg = _("With this duration, the contribution would exceed the event end time.")
else:
latest_dt = get_day_end(day, tzinfo=event.tzinfo)
error_msg = _("With this duration, the contribution would exceed the current day.")
if self.contrib.start_dt + field.data > latest_dt:
raise ValidationError(error_msg)
class ContributionDefaultDurationForm(IndicoForm):
duration = TimeDeltaField(_('Duration'), [DataRequired(), MaxDuration(timedelta(days=1))],
units=('minutes', 'hours'))
class ContributionTypeForm(IndicoForm):
"""Form to create or edit a ContributionType"""
name = StringField(_("Name"), [DataRequired()])
is_private = BooleanField(_("Private"), widget=SwitchWidget(),
description=_("If selected, this contribution type cannot be chosen by users "
"submitting an abstract."))
description = TextAreaField(_("Description"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.contrib_type = kwargs.get('obj')
super(ContributionTypeForm, self).__init__(*args, **kwargs)
def validate_name(self, field):
query = self.event.contribution_types.filter(db.func.lower(ContributionType.name) == field.data.lower())
if self.contrib_type:
query = query.filter(ContributionType.id != self.contrib_type.id)
if query.count():
raise ValidationError(_("A contribution type with this name already exists"))
class ContributionExportTeXForm(IndicoForm):
"""Form for TeX-based export selection"""
format = SelectField(_('Format'), default='PDF')
sort_by = IndicoEnumSelectField(_('Sort by'), enum=BOASortField, default=BOASortField.abstract_title,
sorted=True)
contribution_ids = HiddenFieldList()
submitted = HiddenField()
def __init__(self, *args, **kwargs):
self.contribs = kwargs.get('contribs')
super(ContributionExportTeXForm, self).__init__(*args, **kwargs)
if not self.contribution_ids.data:
self.contribution_ids.data = [c.id for c in self.contribs]
def is_submitted(self):
return super(ContributionExportTeXForm, self).is_submitted() and 'submitted' in request.form
| mit |
RanadeepPolavarapu/kuma | vendor/packages/translate/storage/xliff.py | 22 | 31049 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2011 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Module for handling XLIFF files for translation.
The official recommendation is to use the extention .xlf for XLIFF files.
"""
from lxml import etree
from translate.misc.multistring import multistring
from translate.storage import base, lisa
from translate.storage.lisa import getXMLspace
from translate.storage.placeables.lisa import strelem_to_xml, xml_to_strelem
from translate.storage.workflow import StateEnum as state
# TODO: handle translation types
ID_SEPARATOR = u"\04"
# ID_SEPARATOR is commonly used through toolkit to generate compound
# unit ids (for instance to concatenate msgctxt and msgid in po), but
# \04 is an illegal char in XML 1.0, ID_SEPARATOR_SAFE will be used
# instead when converting between xliff and other toolkit supported
# formats
ID_SEPARATOR_SAFE = u"__%04__"
class xliffunit(lisa.LISAunit):
"""A single term in the xliff file."""
rootNode = "trans-unit"
languageNode = "source"
textNode = ""
namespace = 'urn:oasis:names:tc:xliff:document:1.1'
_default_xml_space = "default"
# TODO: id and all the trans-unit level stuff
S_UNTRANSLATED = state.EMPTY
S_NEEDS_TRANSLATION = state.NEEDS_WORK
S_NEEDS_REVIEW = state.NEEDS_REVIEW
S_TRANSLATED = state.UNREVIEWED
S_SIGNED_OFF = state.FINAL
statemap = {
"new": S_UNTRANSLATED + 1,
"needs-translation": S_NEEDS_TRANSLATION,
"needs-adaptation": S_NEEDS_TRANSLATION + 1,
"needs-l10n": S_NEEDS_TRANSLATION + 2,
"needs-review-translation": S_NEEDS_REVIEW,
"needs-review-adaptation": S_NEEDS_REVIEW + 1,
"needs-review-l10n": S_NEEDS_REVIEW + 2,
"translated": S_TRANSLATED,
"signed-off": S_SIGNED_OFF,
"final": S_SIGNED_OFF + 1,
}
statemap_r = dict((i[1], i[0]) for i in statemap.iteritems())
STATE = {
S_UNTRANSLATED: (state.EMPTY, state.NEEDS_WORK),
S_NEEDS_TRANSLATION: (state.NEEDS_WORK, state.NEEDS_REVIEW),
S_NEEDS_REVIEW: (state.NEEDS_REVIEW, state.UNREVIEWED),
S_TRANSLATED: (state.UNREVIEWED, state.FINAL),
S_SIGNED_OFF: (state.FINAL, state.MAX),
}
def __init__(self, source, empty=False, **kwargs):
"""Override the constructor to set xml:space="preserve"."""
super(xliffunit, self).__init__(source, empty, **kwargs)
if empty:
return
lisa.setXMLspace(self.xmlelement, "preserve")
def createlanguageNode(self, lang, text, purpose):
"""Returns an xml Element setup with given parameters."""
# TODO: for now we do source, but we have to test if it is target,
# perhaps with parameter. Alternatively, we can use lang, if
# supplied, since an xliff file has to conform to the bilingual
# nature promised by the header.
assert purpose
langset = etree.Element(self.namespaced(purpose))
# TODO: check language
#lisa.setXMLlang(langset, lang)
langset.text = text
return langset
def getlanguageNodes(self):
"""We override this to get source and target nodes."""
source = None
target = None
nodes = []
try:
source = self.xmlelement.iterchildren(self.namespaced(self.languageNode)).next()
target = self.xmlelement.iterchildren(self.namespaced('target')).next()
nodes = [source, target]
except StopIteration:
if source is not None:
nodes.append(source)
if not target is None:
nodes.append(target)
return nodes
def set_rich_source(self, value, sourcelang='en'):
sourcelanguageNode = self.get_source_dom()
if sourcelanguageNode is None:
sourcelanguageNode = self.createlanguageNode(sourcelang, u'', "source")
self.set_source_dom(sourcelanguageNode)
# Clear sourcelanguageNode first
for i in range(len(sourcelanguageNode)):
del sourcelanguageNode[0]
sourcelanguageNode.text = None
strelem_to_xml(sourcelanguageNode, value[0])
def get_rich_source(self):
#rsrc = xml_to_strelem(self.source_dom)
#logging.debug('rich source: %s' % (repr(rsrc)))
#from dubulib.debug.misc import print_stack_funcs
#print_stack_funcs()
return [
xml_to_strelem(self.source_dom,
getXMLspace(self.xmlelement,
self._default_xml_space))
]
rich_source = property(get_rich_source, set_rich_source)
def set_rich_target(self, value, lang='xx', append=False):
self._rich_target = None
if value is None:
self.set_target_dom(self.createlanguageNode(lang, u'', "target"))
return
languageNode = self.get_target_dom()
if languageNode is None:
languageNode = self.createlanguageNode(lang, u'', "target")
self.set_target_dom(languageNode, append)
# Clear languageNode first
for i in range(len(languageNode)):
del languageNode[0]
languageNode.text = None
strelem_to_xml(languageNode, value[0])
### currently giving some issues in Virtaal: self._rich_target = value
def get_rich_target(self, lang=None):
"""retrieves the "target" text (second entry), or the entry in the
specified language, if it exists"""
if self._rich_target is None:
self._rich_target = [
xml_to_strelem(self.get_target_dom(lang),
getXMLspace(self.xmlelement, self._default_xml_space))
]
return self._rich_target
rich_target = property(get_rich_target, set_rich_target)
def addalttrans(self, txt, origin=None, lang=None, sourcetxt=None,
matchquality=None):
"""Adds an alt-trans tag and alt-trans components to the unit.
:type txt: String
:param txt: Alternative translation of the source text.
"""
# TODO: support adding a source tag ad match quality attribute. At the
# source tag is needed to inject fuzzy matches from a TM.
if isinstance(txt, str):
txt = txt.decode("utf-8")
alttrans = etree.SubElement(self.xmlelement, self.namespaced("alt-trans"))
lisa.setXMLspace(alttrans, "preserve")
if sourcetxt:
if isinstance(sourcetxt, str):
sourcetxt = sourcetxt.decode("utf-8")
altsource = etree.SubElement(alttrans, self.namespaced("source"))
altsource.text = sourcetxt
alttarget = etree.SubElement(alttrans, self.namespaced("target"))
alttarget.text = txt
if matchquality:
alttrans.set("match-quality", matchquality)
if origin:
alttrans.set("origin", origin)
if lang:
lisa.setXMLlang(alttrans, lang)
def getalttrans(self, origin=None):
"""Returns <alt-trans> for the given origin as a list of units. No
origin means all alternatives."""
translist = []
for node in self.xmlelement.iterdescendants(self.namespaced("alt-trans")):
if self.correctorigin(node, origin):
# We build some mini units that keep the xmlelement. This
# makes it easier to delete it if it is passed back to us.
newunit = base.TranslationUnit(self.source)
# the source tag is optional
sourcenode = node.iterdescendants(self.namespaced("source"))
try:
newunit.source = lisa.getText(sourcenode.next(),
getXMLspace(node, self._default_xml_space))
except StopIteration:
pass
# must have one or more targets
targetnode = node.iterdescendants(self.namespaced("target"))
newunit.target = lisa.getText(targetnode.next(),
getXMLspace(node, self._default_xml_space))
# TODO: support multiple targets better
# TODO: support notes in alt-trans
newunit.xmlelement = node
translist.append(newunit)
return translist
def delalttrans(self, alternative):
"""Removes the supplied alternative from the list of alt-trans tags"""
self.xmlelement.remove(alternative.xmlelement)
def addnote(self, text, origin=None, position="append"):
"""Add a note specifically in a "note" tag"""
if position != "append":
self.removenotes(origin=origin)
if text:
text = text.strip()
if not text:
return
if isinstance(text, str):
text = text.decode("utf-8")
note = etree.SubElement(self.xmlelement, self.namespaced("note"))
note.text = text
if origin:
note.set("from", origin)
def _getnotelist(self, origin=None):
"""Returns the text from notes matching ``origin`` or all notes.
:param origin: The origin of the note (or note type)
:type origin: String
:return: The text from notes matching ``origin``
:rtype: List
"""
note_nodes = self.xmlelement.iterdescendants(self.namespaced("note"))
# TODO: consider using xpath to construct initial_list directly
# or to simply get the correct text from the outset (just remember to
# check for duplication.
initial_list = [lisa.getText(note, getXMLspace(self.xmlelement, self._default_xml_space)) for note in note_nodes if self.correctorigin(note, origin)]
# Remove duplicate entries from list:
dictset = {}
note_list = [dictset.setdefault(note, note) for note in initial_list if note not in dictset]
return note_list
def getnotes(self, origin=None):
return '\n'.join(self._getnotelist(origin=origin))
def removenotes(self, origin="translator"):
"""Remove all the translator notes."""
notes = self.xmlelement.iterdescendants(self.namespaced("note"))
for note in notes:
if self.correctorigin(note, origin=origin):
self.xmlelement.remove(note)
def adderror(self, errorname, errortext):
"""Adds an error message to this unit."""
# TODO: consider factoring out: some duplication between XLIFF and TMX
text = errorname
if errortext:
text += ': ' + errortext
self.addnote(text, origin="pofilter")
def geterrors(self):
"""Get all error messages."""
# TODO: consider factoring out: some duplication between XLIFF and TMX
notelist = self._getnotelist(origin="pofilter")
errordict = {}
for note in notelist:
errorname, errortext = note.split(': ')
errordict[errorname] = errortext
return errordict
def get_state_n(self):
targetnode = self.getlanguageNode(lang=None, index=1)
if targetnode is None:
if self.isapproved():
return self.S_UNREVIEWED
else:
return self.S_UNTRANSLATED
xmlstate = targetnode.get("state", None)
state_n = self.statemap.get(xmlstate, self.S_UNTRANSLATED)
if state_n < self.S_NEEDS_TRANSLATION and self.target:
state_n = self.S_NEEDS_TRANSLATION
if self.isapproved() and state_n < self.S_UNREVIEWED:
state_n = self.S_UNREVIEWED
if not self.isapproved() and state_n > self.S_UNREVIEWED:
state_n = self.S_UNREVIEWED
return state_n
def set_state_n(self, value):
if value not in self.statemap_r:
value = self.get_state_id(value)
targetnode = self.getlanguageNode(lang=None, index=1)
# FIXME: handle state qualifiers
if value == self.S_UNTRANSLATED:
if targetnode is not None and "state" in targetnode.attrib:
del targetnode.attrib["state"]
else:
if targetnode is not None:
xmlstate = self.statemap_r.get(value)
targetnode.set("state", xmlstate)
self.markapproved(value > self.S_NEEDS_REVIEW)
def isapproved(self):
"""States whether this unit is approved."""
return self.xmlelement.get("approved") == "yes"
def markapproved(self, value=True):
"""Mark this unit as approved."""
if value:
self.xmlelement.set("approved", "yes")
elif self.isapproved():
self.xmlelement.set("approved", "no")
def isreview(self):
"""States whether this unit needs to be reviewed"""
return self.get_state_id() == self.S_NEEDS_REVIEW
def markreviewneeded(self, needsreview=True, explanation=None):
"""Marks the unit to indicate whether it needs review.
Adds an optional explanation as a note."""
state_id = self.get_state_id()
if needsreview and state_id != self.S_NEEDS_REVIEW:
self.set_state_n(self.S_NEEDS_REVIEW)
if explanation:
self.addnote(explanation, origin="translator")
elif not needsreview and state_id < self.S_UNREVIEWED:
self.set_state_n(self.S_UNREVIEWED)
def isfuzzy(self):
# targetnode = self.getlanguageNode(lang=None, index=1)
# return not targetnode is None and \
# (targetnode.get("state-qualifier") == "fuzzy-match" or \
# targetnode.get("state") == "needs-review-translation")
return not self.isapproved() and bool(self.target)
def markfuzzy(self, value=True):
state_id = self.get_state_id()
if value:
self.markapproved(False)
if state_id != self.S_NEEDS_TRANSLATION:
self.set_state_n(self.S_NEEDS_TRANSLATION)
else:
self.markapproved(True)
if state_id < self.S_UNREVIEWED:
self.set_state_n(self.S_UNREVIEWED)
def settarget(self, text, lang='xx', append=False):
"""Sets the target string to the given value."""
super(xliffunit, self).settarget(text, lang, append)
if text:
self.marktranslated()
# This code is commented while this will almost always return false.
# This way pocount, etc. works well.
# def istranslated(self):
# targetnode = self.getlanguageNode(lang=None, index=1)
# return not targetnode is None and \
# (targetnode.get("state") == "translated")
def istranslatable(self):
value = self.xmlelement.get("translate")
if value and value.lower() == 'no':
return False
return True
def marktranslated(self):
state_id = self.get_state_id()
if state_id < self.S_UNREVIEWED:
self.set_state_n(self.S_UNREVIEWED)
def setid(self, id):
# sanitize id in case ID_SEPERATOR is present
self.xmlelement.set("id", id.replace(ID_SEPARATOR, ID_SEPARATOR_SAFE))
def getid(self):
uid = u""
try:
filename = self.xmlelement.iterancestors(self.namespaced('file')).next().get('original')
if filename:
uid = filename + ID_SEPARATOR
except StopIteration:
# unit has no proper file ancestor, probably newly created
pass
# hide the fact that we sanitize ID_SEPERATOR
uid += unicode(self.xmlelement.get("id") or u"").replace(ID_SEPARATOR_SAFE, ID_SEPARATOR)
return uid
def addlocation(self, location):
self.setid(location)
def getlocations(self):
id_attr = unicode(self.xmlelement.get("id") or u"")
# XLIFF files downloaded from PO projects in Pootle
# might have id equal to .source, so let's avoid
# that:
if id_attr and id_attr != self.source:
return [id_attr]
return []
def createcontextgroup(self, name, contexts=None, purpose=None):
"""Add the context group to the trans-unit with contexts a list with
(type, text) tuples describing each context."""
assert contexts
group = etree.Element(self.namespaced("context-group"))
# context-group tags must appear at the start within <group>
# tags. Otherwise it must be appended to the end of a group
# of tags.
if self.xmlelement.tag == self.namespaced("group"):
self.xmlelement.insert(0, group)
else:
self.xmlelement.append(group)
group.set("name", name)
if purpose:
group.set("purpose", purpose)
for type, text in contexts:
if isinstance(text, str):
text = text.decode("utf-8")
context = etree.SubElement(group, self.namespaced("context"))
context.text = text
context.set("context-type", type)
def getcontextgroups(self, name):
"""Returns the contexts in the context groups with the specified name"""
groups = []
grouptags = self.xmlelement.iterdescendants(self.namespaced("context-group"))
# TODO: conbine name in query
for group in grouptags:
if group.get("name") == name:
contexts = group.iterdescendants(self.namespaced("context"))
pairs = []
for context in contexts:
pairs.append((context.get("context-type"), lisa.getText(context, getXMLspace(self.xmlelement, self._default_xml_space))))
groups.append(pairs) # not extend
return groups
def getrestype(self):
"""returns the restype attribute in the trans-unit tag"""
return self.xmlelement.get("restype")
def merge(self, otherunit, overwrite=False, comments=True, authoritative=False):
# TODO: consider other attributes like "approved"
super(xliffunit, self).merge(otherunit, overwrite, comments)
if self.target:
self.marktranslated()
if otherunit.isfuzzy():
self.markfuzzy()
elif otherunit.source == self.source:
self.markfuzzy(False)
elif otherunit.source != self.source:
self.markfuzzy(True)
if comments:
self.addnote(otherunit.getnotes())
def correctorigin(self, node, origin):
"""Check against node tag's origin (e.g note or alt-trans)"""
if origin is None:
return True
elif origin in node.get("from", ""):
return True
elif origin in node.get("origin", ""):
return True
else:
return False
@classmethod
def multistring_to_rich(cls, mstr):
"""Override :meth:`TranslationUnit.multistring_to_rich` which is used
by the ``rich_source`` and ``rich_target`` properties."""
strings = mstr
if isinstance(mstr, multistring):
strings = mstr.strings
elif isinstance(mstr, basestring):
strings = [mstr]
return [xml_to_strelem(s) for s in strings]
@classmethod
def rich_to_multistring(cls, elem_list):
"""Override :meth:`TranslationUnit.rich_to_multistring` which is used
by the ``rich_source`` and ``rich_target`` properties."""
return multistring([unicode(elem) for elem in elem_list])
class xlifffile(lisa.LISAfile):
"""Class representing a XLIFF file store."""
UnitClass = xliffunit
Name = "XLIFF Translation File"
Mimetypes = ["application/x-xliff", "application/x-xliff+xml"]
Extensions = ["xlf", "xliff", "sdlxliff"]
rootNode = "xliff"
bodyNode = "body"
XMLskeleton = '''<?xml version="1.0" ?>
<xliff version='1.1' xmlns='urn:oasis:names:tc:xliff:document:1.1'>
<file original='NoName' source-language='en' datatype='plaintext'>
<body>
</body>
</file>
</xliff>'''
namespace = 'urn:oasis:names:tc:xliff:document:1.1'
unversioned_namespace = 'urn:oasis:names:tc:xliff:document:'
suggestions_in_format = True
"""xliff units have alttrans tags which can be used to store suggestions"""
def __init__(self, *args, **kwargs):
self._filename = None
lisa.LISAfile.__init__(self, *args, **kwargs)
self._messagenum = 0
def initbody(self):
# detect the xliff namespace, handle both 1.1 and 1.2
for prefix, ns in self.document.getroot().nsmap.items():
if ns and ns.startswith(self.unversioned_namespace):
self.namespace = ns
break
else:
# handle crappy xliff docs without proper namespace declaration
# by simply using the xmlns default namespace
self.namespace = self.document.getroot().nsmap.get(None, None)
if self._filename:
filenode = self.getfilenode(self._filename, createifmissing=True)
else:
filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()
self.body = self.getbodynode(filenode, createifmissing=True)
def addheader(self):
"""Initialise the file header."""
pass
def createfilenode(self, filename, sourcelanguage=None,
targetlanguage=None, datatype='plaintext'):
"""creates a filenode with the given filename. All parameters
are needed for XLIFF compliance."""
if sourcelanguage is None:
sourcelanguage = self.sourcelanguage
if targetlanguage is None:
targetlanguage = self.targetlanguage
# find the default NoName file tag and use it instead of creating a new one
for filenode in self.document.getroot().iterchildren(self.namespaced("file")):
if filenode.get("original") == "NoName":
filenode.set("original", filename)
filenode.set("source-language", sourcelanguage)
if targetlanguage:
filenode.set("target-language", targetlanguage)
return filenode
filenode = etree.Element(self.namespaced("file"))
filenode.set("original", filename)
filenode.set("source-language", sourcelanguage)
if targetlanguage:
filenode.set("target-language", targetlanguage)
filenode.set("datatype", datatype)
bodyNode = etree.SubElement(filenode, self.namespaced(self.bodyNode))
return filenode
def getfilename(self, filenode):
"""returns the name of the given file"""
return filenode.get("original")
def setfilename(self, filenode, filename):
"""set the name of the given file"""
return filenode.set("original", filename)
def getfilenames(self):
"""returns all filenames in this XLIFF file"""
filenodes = self.document.getroot().iterchildren(self.namespaced("file"))
filenames = [self.getfilename(filenode) for filenode in filenodes]
filenames = filter(None, filenames)
if len(filenames) == 1 and filenames[0] == '':
filenames = []
return filenames
def getfilenode(self, filename, createifmissing=False):
"""finds the filenode with the given name"""
filenodes = self.document.getroot().iterchildren(self.namespaced("file"))
for filenode in filenodes:
if self.getfilename(filenode) == filename:
return filenode
if createifmissing:
filenode = self.createfilenode(filename)
return filenode
return None
def getids(self, filename=None):
if not filename:
return super(xlifffile, self).getids()
self.id_index = {}
prefix = filename + ID_SEPARATOR
units = (unit for unit in self.units if unit.getid().startswith(prefix))
for index, unit in enumerate(units):
self.id_index[unit.getid()[len(prefix):]] = unit
return self.id_index.keys()
def setsourcelanguage(self, language):
if not language:
return
filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()
filenode.set("source-language", language)
def getsourcelanguage(self):
filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()
return filenode.get("source-language")
sourcelanguage = property(getsourcelanguage, setsourcelanguage)
def settargetlanguage(self, language):
if not language:
return
filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()
filenode.set("target-language", language)
def gettargetlanguage(self):
filenode = self.document.getroot().iterchildren(self.namespaced('file')).next()
return filenode.get("target-language")
targetlanguage = property(gettargetlanguage, settargetlanguage)
def getdatatype(self, filename=None):
"""Returns the datatype of the stored file. If no filename is given,
the datatype of the first file is given."""
if filename:
node = self.getfilenode(filename)
if not node is None:
return node.get("datatype")
else:
filenames = self.getfilenames()
if len(filenames) > 0 and filenames[0] != "NoName":
return self.getdatatype(filenames[0])
return ""
def getdate(self, filename=None):
"""Returns the date attribute for the file.
If no filename is given, the date of the first file is given.
If the date attribute is not specified, None is returned.
:returns: Date attribute of file
:rtype: Date or None
"""
if filename:
node = self.getfilenode(filename)
if not node is None:
return node.get("date")
else:
filenames = self.getfilenames()
if len(filenames) > 0 and filenames[0] != "NoName":
return self.getdate(filenames[0])
return None
def removedefaultfile(self):
"""We want to remove the default file-tag as soon as possible if we
know if still present and empty."""
filenodes = list(self.document.getroot().iterchildren(self.namespaced("file")))
if len(filenodes) > 1:
for filenode in filenodes:
if (filenode.get("original") == "NoName" and
not list(filenode.iterdescendants(self.namespaced(self.UnitClass.rootNode)))):
self.document.getroot().remove(filenode)
break
def getheadernode(self, filenode, createifmissing=False):
"""finds the header node for the given filenode"""
# TODO: Deprecated?
headernode = filenode.iterchildren(self.namespaced("header"))
try:
return headernode.next()
except StopIteration:
pass
if not createifmissing:
return None
headernode = etree.SubElement(filenode, self.namespaced("header"))
return headernode
def getbodynode(self, filenode, createifmissing=False):
"""finds the body node for the given filenode"""
bodynode = filenode.iterchildren(self.namespaced("body"))
try:
return bodynode.next()
except StopIteration:
pass
if not createifmissing:
return None
bodynode = etree.SubElement(filenode, self.namespaced("body"))
return bodynode
def addsourceunit(self, source, filename="NoName", createifmissing=False):
"""adds the given trans-unit to the last used body node if the
filename has changed it uses the slow method instead (will
create the nodes required if asked). Returns success"""
if self._filename != filename:
if not self.switchfile(filename, createifmissing):
return None
unit = super(xlifffile, self).addsourceunit(source)
self._messagenum += 1
unit.setid("%d" % self._messagenum)
return unit
def switchfile(self, filename, createifmissing=False):
"""Adds the given trans-unit (will create the nodes required if asked).
:returns: Success
:rtype: Boolean
"""
self._filename = filename
filenode = self.getfilenode(filename)
if filenode is None:
if not createifmissing:
return False
filenode = self.createfilenode(filename)
self.document.getroot().append(filenode)
self.body = self.getbodynode(filenode, createifmissing=createifmissing)
if self.body is None:
return False
self._messagenum = len(list(self.body.iterdescendants(self.namespaced("trans-unit"))))
# TODO: was 0 based before - consider
# messagenum = len(self.units)
# TODO: we want to number them consecutively inside a body/file tag
# instead of globally in the whole XLIFF file, but using
# len(self.units) will be much faster
return True
def creategroup(self, filename="NoName", createifmissing=False, restype=None):
"""adds a group tag into the specified file"""
if self._filename != filename:
if not self.switchfile(filename, createifmissing):
return None
group = etree.SubElement(self.body, self.namespaced("group"))
if restype:
group.set("restype", restype)
return group
def __str__(self):
self.removedefaultfile()
return super(xlifffile, self).__str__()
@classmethod
def parsestring(cls, storestring):
"""Parses the string to return the correct file object"""
xliff = super(xlifffile, cls).parsestring(storestring)
if xliff.units:
header = xliff.units[0]
if (("gettext-domain-header" in (header.getrestype() or "") or
xliff.getdatatype() == "po") and
cls.__name__.lower() != "poxlifffile"):
from translate.storage import poxliff
xliff = poxliff.PoXliffFile.parsestring(storestring)
return xliff
| mpl-2.0 |
amanuel/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/packaging/tarbz2.py | 61 | 1803 | """SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/tarbz2.py 5134 2010/08/16 23:02:40 bdeegan"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
target, source = stripinstallbuilder(target, source, env)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
NeCTAR-RC/nova | nova/tests/unit/virt/libvirt/test_imagecache.py | 7 | 42584 | # Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import hashlib
import os
import time
import mock
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import formatters
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from six.moves import cStringIO
from nova import conductor
from nova import context
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
@contextlib.contextmanager
def intercept_log_messages():
try:
mylog = logging.getLogger('nova')
stream = cStringIO()
handler = logging.logging.StreamHandler(stream)
handler.setFormatter(formatters.ContextFormatter())
mylog.logger.addHandler(handler)
yield stream
finally:
mylog.logger.removeHandler(handler)
class ImageCacheManagerTestCase(test.NoDBTestCase):
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
self.stock_instance_names = set(['instance-00000001',
'instance-00000002',
'instance-00000003',
'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
self.stub_out('os.path.exists', lambda x: False)
csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertIsNone(csum)
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(time, 'time', return_value=2000000)
@mock.patch.object(os.path, 'getmtime', return_value=1000000)
def test_get_age_of_file(self, mock_getmtime, mock_time, mock_exists):
image_cache_manager = imagecache.ImageCacheManager()
exists, age = image_cache_manager._get_age_of_file('/tmp')
self.assertTrue(exists)
self.assertEqual(1000000, age)
@mock.patch.object(os.path, 'exists', return_value=False)
def test_get_age_of_file_not_exists(self, mock_exists):
image_cache_manager = imagecache.ImageCacheManager()
exists, age = image_cache_manager._get_age_of_file('/tmp')
self.assertFalse(exists)
self.assertEqual(0, age)
def test_read_stored_checksum(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
f = open(info_fname, 'w')
f.write(csum_input)
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_input.rstrip(),
'{"sha1": "%s"}' % csum_output)
def test_read_stored_checksum_legacy_essex(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
old_fname = fname + '.sha1'
f = open(old_fname, 'w')
f.write('fdghkfhkgjjksfdgjksjkghsdf')
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
self.assertFalse(os.path.exists(old_fname))
info_fname = imagecache.get_info_filename(fname)
self.assertTrue(os.path.exists(info_fname))
def test_list_base_images(self):
listing = ['00000001',
'ephemeral_0_20_None',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
'00000004',
'swap_1000']
images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
'e97222e91fc4241f49a7f520d1dcf446751129b3',
'17d1b00b81642842e514494a78e804e9a511637c',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120',
'17d1b00b81642842e514494a78e804e9a511637c_10737418240']
listing.extend(images)
self.stub_out('os.listdir', lambda x: listing)
self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
self.flags(instances_path='/var/lib/nova/instances')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
sanitized = []
for ent in image_cache_manager.unexplained_images:
sanitized.append(ent.replace(base_dir + '/', ''))
self.assertEqual(sorted(sanitized), sorted(images))
expected = os.path.join(base_dir,
'e97222e91fc4241f49a7f520d1dcf446751129b3')
self.assertIn(expected, image_cache_manager.unexplained_images)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertIn(expected, image_cache_manager.unexplained_images)
unexpected = os.path.join(base_dir, '00000004')
self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
for ent in image_cache_manager.unexplained_images:
self.assertTrue(ent.startswith(base_dir))
self.assertEqual(len(image_cache_manager.originals), 2)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c')
self.assertIn(expected, image_cache_manager.originals)
unexpected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertNotIn(unexpected, image_cache_manager.originals)
self.assertEqual(1, len(image_cache_manager.back_swap_images))
self.assertIn('swap_1000', image_cache_manager.back_swap_images)
def test_list_backing_images_small(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stub_out('os.path.exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_resized(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stub_out('os.path.exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_instancename(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stub_out('os.path.exists',
lambda x: x.find('banana-42-hamster') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_disk_notexist(self):
self.stub_out('os.listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stub_out('os.path.exists',
lambda x: x.find('banana-42-hamster') != -1)
def fake_get_disk(disk_path):
raise processutils.ProcessExecutionError()
self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = []
image_cache_manager.instance_names = self.stock_instance_names
self.assertRaises(processutils.ProcessExecutionError,
image_cache_manager._list_backing_images)
def test_find_base_file_nothing(self):
self.stub_out('os.path.exists', lambda x: False)
base_dir = '/var/lib/nova/instances/_base'
fingerprint = '549867354867'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
self.assertEqual(0, len(res))
def test_find_base_file_small(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
self.stub_out('os.path.exists',
lambda x: x.endswith('%s_sm' % fingerprint))
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_sm')
self.assertEqual(res, [(base_file, True, False)])
def test_find_base_file_resized(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stub_out('os.listdir', lambda x: listing)
self.stub_out('os.path.exists',
lambda x: x.endswith('%s_10737418240' % fingerprint))
self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertEqual(res, [(base_file, False, True)])
def test_find_base_file_all(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stub_out('os.listdir', lambda x: listing)
self.stub_out('os.path.exists', lambda x: True)
self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file1 = os.path.join(base_dir, fingerprint)
base_file2 = os.path.join(base_dir, fingerprint + '_sm')
base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertEqual(res, [(base_file1, False, False),
(base_file2, True, False),
(base_file3, False, True)])
@contextlib.contextmanager
def _make_base_file(self, checksum=True, lock=True):
"""Make a base file for testing."""
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
base_file = open(fname, 'w')
base_file.write('data')
base_file.close()
if lock:
lockdir = os.path.join(tmpdir, 'locks')
lockname = os.path.join(lockdir, 'nova-aaa')
os.mkdir(lockdir)
lock_file = open(lockname, 'w')
lock_file.write('data')
lock_file.close()
base_file = open(fname, 'r')
if checksum:
imagecache.write_stored_checksum(fname)
base_file.close()
yield fname
def test_remove_base_file(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
lock_name = 'nova-' + os.path.split(fname)[-1]
lock_dir = os.path.join(CONF.instances_path, 'locks')
lock_file = os.path.join(lock_dir, lock_name)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
self.assertTrue(os.path.exists(lock_file))
# Old files get cleaned up though
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
self.assertFalse(os.path.exists(lock_file))
def test_remove_base_file_original(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.originals = [fname]
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# This file should stay longer than a resized image
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# Originals don't stay forever though
os.utime(fname, (-1, time.time() - 3600 * 25))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_dne(self):
# This test is solely to execute the "does not exist" code path. We
# don't expect the method being tested to do anything in this case.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
def test_remove_base_file_oserror(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
os.mkdir(fname)
os.utime(fname, (-1, time.time() - 3601))
# This will raise an OSError because of file permissions
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertNotEqual(stream.getvalue().find('Failed to remove'),
-1)
def test_handle_base_image_unused(self):
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files,
[fname])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_used(self, mock_mtime):
img = '123'
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_used_remotely(self, mock_mtime):
img = '123'
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_absent(self):
img = '123'
with intercept_log_messages() as stream:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, None)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
self.assertNotEqual(stream.getvalue().find('an absent base file'),
-1)
def test_handle_base_image_used_missing(self):
img = '123'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@mock.patch.object(libvirt_utils, 'update_mtime')
def test_handle_base_image_checksum_fails(self, mock_mtime):
self.flags(checksum_base_images=True, group='libvirt')
img = '123'
with self._make_base_file() as fname:
with open(fname, 'w') as f:
f.write('banana')
d = {'sha1': '21323454'}
with open('%s.info' % fname, 'w') as f:
f.write(jsonutils.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files,
[fname])
@mock.patch.object(libvirt_utils, 'update_mtime')
@mock.patch.object(lockutils, 'external_lock')
def test_verify_base_images(self, mock_lock, mock_mtime):
hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
self.flags(instances_path='/instance_path',
image_cache_subdirectory_name='_base')
base_file_list = ['00000001',
'ephemeral_0_20_None',
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
hashed_42,
hashed_1,
hashed_21,
hashed_22,
'%s_5368709120' % hashed_1,
'%s_10737418240' % hashed_1,
'00000004']
def fq_path(path):
return os.path.join('/instance_path/_base/', path)
# Fake base directory existence
orig_exists = os.path.exists
def exists(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_exists(path)
if path in ['/instance_path',
'/instance_path/_base',
'/instance_path/instance-1/disk',
'/instance_path/instance-2/disk',
'/instance_path/instance-3/disk',
'/instance_path/_base/%s.info' % hashed_42]:
return True
for p in base_file_list:
if path == fq_path(p):
return True
if path == fq_path(p) + '.info':
return False
if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
hashed_21,
hashed_22,
hashed_42]]:
return False
self.fail('Unexpected path existence check: %s' % path)
self.stub_out('os.path.exists', lambda x: exists(x))
# Fake up some instances in the instances directory
orig_listdir = os.listdir
def listdir(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_listdir(path)
if path == '/instance_path':
return ['instance-1', 'instance-2', 'instance-3', '_base']
if path == '/instance_path/_base':
return base_file_list
self.fail('Unexpected directory listed: %s' % path)
self.stub_out('os.listdir', lambda x: listdir(x))
# Fake isfile for these faked images in _base
orig_isfile = os.path.isfile
def isfile(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_isfile(path)
for p in base_file_list:
if path == fq_path(p):
return True
self.fail('Unexpected isfile call: %s' % path)
self.stub_out('os.path.isfile', lambda x: isfile(x))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'kernel_id': '21',
'ramdisk_id': '22',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = [fake_instance.fake_instance_obj(None, **instance)
for instance in instances]
image_cache_manager = imagecache.ImageCacheManager()
# Fake the utils call which finds the backing image
def get_disk_backing_file(path):
if path in ['/instance_path/instance-1/disk',
'/instance_path/instance-2/disk']:
return fq_path('%s_5368709120' % hashed_1)
self.fail('Unexpected backing file lookup: %s' % path)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: get_disk_backing_file(x))
# Fake out verifying checksums, as that is tested elsewhere
self.stubs.Set(image_cache_manager, '_verify_checksum',
lambda x, y: True)
# Fake getmtime as well
orig_getmtime = os.path.getmtime
def getmtime(path):
if not path.startswith('/instance_path'):
return orig_getmtime(path)
return 1000000
self.stub_out('os.path.getmtime', lambda x: getmtime(x))
# Make sure we don't accidentally remove a real file
orig_remove = os.remove
def remove(path):
if not path.startswith('/instance_path'):
return orig_remove(path)
# Don't try to remove fake files
return
self.stub_out('os.remove', lambda x: remove(x))
self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
ctxt = context.get_admin_context()
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '123').AndReturn(None)
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '456').AndReturn(None)
self.mox.ReplayAll()
# And finally we can make the call we're actually testing...
# The argument here should be a context, but it is mocked out
image_cache_manager.update(ctxt, all_instances)
# Verify
active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
fq_path(hashed_21), fq_path(hashed_22)]
for act in active:
self.assertIn(act, image_cache_manager.active_base_files)
self.assertEqual(len(image_cache_manager.active_base_files),
len(active))
for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
fq_path(hashed_42),
fq_path('%s_10737418240' % hashed_1)]:
self.assertIn(rem, image_cache_manager.removable_base_files)
# Ensure there are no "corrupt" images as well
self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
def test_verify_base_images_no_base(self):
self.flags(instances_path='/tmp/no/such/dir/name/please')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(None, [])
def test_is_valid_info_file(self):
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
self.flags(instances_path='/tmp/no/such/dir/name/please')
self.flags(image_info_filename_pattern=('$instances_path/_base/'
'%(image)s.info'),
group='libvirt')
base_filename = os.path.join(CONF.instances_path, '_base', hashed)
is_valid_info_file = imagecache.is_valid_info_file
self.assertFalse(is_valid_info_file('banana'))
self.assertFalse(is_valid_info_file(
os.path.join(CONF.instances_path, '_base', '00000001')))
self.assertFalse(is_valid_info_file(base_filename))
self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
self.assertTrue(is_valid_info_file(base_filename + '.info'))
def test_configured_checksum_path(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
# Ensure there is a base directory
os.mkdir(os.path.join(tmpdir, '_base'))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = []
for instance in instances:
all_instances.append(fake_instance.fake_instance_obj(
None, **instance))
def touch(filename):
f = open(filename, 'w')
f.write('Touched')
f.close()
old = time.time() - (25 * 3600)
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
base_filename = os.path.join(tmpdir, hashed)
touch(base_filename)
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
self.mox.StubOutWithMock(
objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
ctxt = context.get_admin_context()
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '123').AndReturn(None)
objects.block_device.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, '456').AndReturn(None)
self.mox.ReplayAll()
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(ctxt,
all_instances)
self.assertTrue(os.path.exists(base_filename))
self.assertTrue(os.path.exists(base_filename + '.info'))
def test_run_image_cache_manager_pass(self):
was = {'called': False}
def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
instances = []
for x in range(2):
instances.append(fake_instance.fake_db_instance(
image_ref='1',
uuid=x,
name=x,
vm_state='',
task_state=''))
return instances
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.stub_out('nova.db.instance_get_all_by_filters',
fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()
ctxt = context.get_admin_context()
compute._run_image_cache_manager_pass(ctxt)
self.assertTrue(was['called'])
def test_store_swap_image(self):
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._store_swap_image('swap_')
image_cache_manager._store_swap_image('swap_123')
image_cache_manager._store_swap_image('swap_456')
image_cache_manager._store_swap_image('swap_abc')
image_cache_manager._store_swap_image('123_swap')
image_cache_manager._store_swap_image('swap_129_')
self.assertEqual(len(image_cache_manager.back_swap_images), 2)
expect_set = set(['swap_123', 'swap_456'])
self.assertEqual(image_cache_manager.back_swap_images, expect_set)
@mock.patch.object(lockutils, 'external_lock')
@mock.patch.object(libvirt_utils, 'update_mtime')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.getmtime')
@mock.patch('os.remove')
def test_age_and_verify_swap_images(self, mock_remove, mock_getmtime,
mock_exist, mock_mtime, mock_lock):
image_cache_manager = imagecache.ImageCacheManager()
expected_remove = set()
expected_exist = set(['swap_128', 'swap_256'])
image_cache_manager.back_swap_images.add('swap_128')
image_cache_manager.back_swap_images.add('swap_256')
image_cache_manager.used_swap_images.add('swap_128')
def getmtime(path):
return time.time() - 1000000
mock_getmtime.side_effect = getmtime
def removefile(path):
if not path.startswith('/tmp_age_test'):
return os.remove(path)
fn = os.path.split(path)[-1]
expected_remove.add(fn)
expected_exist.remove(fn)
mock_remove.side_effect = removefile
image_cache_manager._age_and_verify_swap_images(None, '/tmp_age_test')
self.assertEqual(1, len(expected_exist))
self.assertEqual(1, len(expected_remove))
self.assertIn('swap_128', expected_exist)
self.assertIn('swap_256', expected_remove)
@mock.patch.object(utils, 'synchronized')
@mock.patch.object(imagecache.ImageCacheManager, '_get_age_of_file',
return_value=(True, 100))
def test_lock_acquired_on_removing_old_enough_files(self, mock_get_age,
mock_synchronized):
base_file = '/tmp_age_test'
lock_path = os.path.join(CONF.instances_path, 'locks')
lock_file = os.path.split(base_file)[-1]
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_old_enough_file(
base_file, 60, remove_sig=False, remove_lock=False)
mock_synchronized.assert_called_once_with(lock_file, external=True,
lock_path=lock_path)
class VerifyChecksumTestCase(test.NoDBTestCase):
def setUp(self):
super(VerifyChecksumTestCase, self).setUp()
self.img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True, group='libvirt')
def _make_checksum(self, tmpdir):
testdata = ('OpenStack Software delivers a massively scalable cloud '
'operating system.')
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
with open(fname, 'w') as f:
f.write(testdata)
return fname, info_fname, testdata
def _write_file(self, info_fname, info_attr, testdata):
f = open(info_fname, 'w')
if info_attr == "csum valid":
csum = hashlib.sha1()
csum.update(testdata)
f.write('{"sha1": "%s"}\n' % csum.hexdigest())
elif info_attr == "csum invalid, not json":
f.write('banana')
else:
f.write('{"sha1": "banana"}')
f.close()
def _check_body(self, tmpdir, info_attr):
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
self._write_file(info_fname, info_attr, testdata)
image_cache_manager = imagecache.ImageCacheManager()
return image_cache_manager, fname
def test_verify_checksum(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertTrue(res)
def test_verify_checksum_disabled(self):
self.flags(checksum_base_images=False, group='libvirt')
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertIsNone(res)
def test_verify_checksum_invalid_json(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=False)
self.assertFalse(res)
log = stream.getvalue()
# NOTE(mikal): this is a skip not a fail because the file is
# present, but is not in valid JSON format and therefore is
# skipped.
self.assertNotEqual(log.find('image verification skipped'), -1)
def test_verify_checksum_invalid_repaired(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=True)
self.assertIsNone(res)
def test_verify_checksum_invalid(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, valid json"))
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertFalse(res)
log = stream.getvalue()
self.assertNotEqual(log.find('image verification failed'), -1)
def test_verify_checksum_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum('aaa', fname)
self.assertIsNone(res)
# Checksum requests for a file with no checksum now have the
# side effect of creating the checksum
self.assertTrue(os.path.exists(info_fname))
| apache-2.0 |
2014c2g23/2015cda | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_assertions.py | 738 | 15398 | import datetime
import warnings
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
| gpl-3.0 |
jamesblunt/sympy | sympy/printing/tests/test_mathematica.py | 93 | 2612 | from sympy.core import (S, pi, oo, symbols, Function,
Rational, Integer, Tuple)
from sympy.integrals import Integral
from sympy.concrete import Sum
from sympy.functions import exp, sin, cos
from sympy import mathematica_code as mcode
x, y, z = symbols('x,y,z')
f = Function('f')
def test_Integer():
assert mcode(Integer(67)) == "67"
assert mcode(Integer(-1)) == "-1"
def test_Rational():
assert mcode(Rational(3, 7)) == "3/7"
assert mcode(Rational(18, 9)) == "2"
assert mcode(Rational(3, -7)) == "-3/7"
assert mcode(Rational(-3, -7)) == "3/7"
assert mcode(x + Rational(3, 7)) == "x + 3/7"
assert mcode(Rational(3, 7)*x) == "(3/7)*x"
def test_Function():
assert mcode(f(x, y, z)) == "f[x, y, z]"
assert mcode(sin(x) ** cos(x)) == "Sin[x]^Cos[x]"
def test_Pow():
assert mcode(x**3) == "x^3"
assert mcode(x**(y**3)) == "x^(y^3)"
assert mcode(1/(f(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*f[x])^(-x + y^x)/(x^2 + y)"
assert mcode(x**-1.0) == 'x^(-1.0)'
assert mcode(x**Rational(2, 3)) == 'x^(2/3)'
def test_Mul():
A, B, C, D = symbols('A B C D', commutative=False)
assert mcode(x*y*z) == "x*y*z"
assert mcode(x*y*A) == "x*y*A"
assert mcode(x*y*A*B) == "x*y*A**B"
assert mcode(x*y*A*B*C) == "x*y*A**B**C"
assert mcode(x*A*B*(C + D)*A*y) == "x*y*A**B**(C + D)**A"
def test_constants():
assert mcode(pi) == "Pi"
assert mcode(oo) == "Infinity"
assert mcode(S.NegativeInfinity) == "-Infinity"
assert mcode(S.EulerGamma) == "EulerGamma"
assert mcode(S.Catalan) == "Catalan"
assert mcode(S.Exp1) == "E"
def test_containers():
assert mcode([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \
"{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}"
assert mcode((1, 2, (3, 4))) == "{1, 2, {3, 4}}"
assert mcode([1]) == "{1}"
assert mcode((1,)) == "{1}"
assert mcode(Tuple(*[1, 2, 3])) == "{1, 2, 3}"
def test_Integral():
assert mcode(Integral(sin(sin(x)), x)) == "Hold[Integrate[Sin[Sin[x]], x]]"
assert mcode(Integral(exp(-x**2 - y**2),
(x, -oo, oo),
(y, -oo, oo))) == \
"Hold[Integrate[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, " \
"{y, -Infinity, Infinity}]]"
def test_Sum():
assert mcode(Sum(sin(x), (x, 0, 10))) == "Hold[Sum[Sin[x], {x, 0, 10}]]"
assert mcode(Sum(exp(-x**2 - y**2),
(x, -oo, oo),
(y, -oo, oo))) == \
"Hold[Sum[Exp[-x^2 - y^2], {x, -Infinity, Infinity}, " \
"{y, -Infinity, Infinity}]]"
| bsd-3-clause |
40223151/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/case.py | 743 | 48873 | """Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-3.0 |
jbassen/edx-platform | common/test/acceptance/tests/lms/test_lms_edxnotes.py | 84 | 44359 | """
Test LMS Notes
"""
from uuid import uuid4
from datetime import datetime
from nose.plugins.attrib import attr
from ..helpers import UniqueCourseTest
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.edxnotes import EdxNotesUnitPage, EdxNotesPage, EdxNotesPageNoContent
from ...fixtures.edxnotes import EdxNotesFixture, Note, Range
from ..helpers import EventsTestMixin
class EdxNotesTestMixin(UniqueCourseTest):
"""
Creates a course with initial data and contains useful helper methods.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EdxNotesTestMixin, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.note_unit_page = EdxNotesUnitPage(self.browser, self.course_id)
self.notes_page = EdxNotesPage(self.browser, self.course_id)
self.username = str(uuid4().hex)[:5]
self.email = "{}@email.com".format(self.username)
self.selector = "annotate-id"
self.edxnotes_fixture = EdxNotesFixture()
self.course_fixture = CourseFixture(
self.course_info["org"], self.course_info["number"],
self.course_info["run"], self.course_info["display_name"]
)
self.course_fixture.add_advanced_settings({
u"edxnotes": {u"value": True}
})
self.course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section 1").add_children(
XBlockFixtureDesc("sequential", "Test Subsection 1").add_children(
XBlockFixtureDesc("vertical", "Test Unit 1").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 1",
data="""
<p><span class="{}">Annotate this text!</span></p>
<p>Annotate this text</p>
""".format(self.selector)
),
XBlockFixtureDesc(
"html",
"Test HTML 2",
data="""<p><span class="{}">Annotate this text!</span></p>""".format(self.selector)
),
),
XBlockFixtureDesc("vertical", "Test Unit 2").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 3",
data="""<p><span class="{}">Annotate this text!</span></p>""".format(self.selector)
),
),
),
XBlockFixtureDesc("sequential", "Test Subsection 2").add_children(
XBlockFixtureDesc("vertical", "Test Unit 3").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 4",
data="""
<p><span class="{}">Annotate this text!</span></p>
""".format(self.selector)
),
),
),
),
XBlockFixtureDesc("chapter", "Test Section 2").add_children(
XBlockFixtureDesc("sequential", "Test Subsection 3").add_children(
XBlockFixtureDesc("vertical", "Test Unit 4").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 5",
data="""
<p><span class="{}">Annotate this text!</span></p>
""".format(self.selector)
),
XBlockFixtureDesc(
"html",
"Test HTML 6",
data="""<p><span class="{}">Annotate this text!</span></p>""".format(self.selector)
),
),
),
)).install()
self.addCleanup(self.edxnotes_fixture.cleanup)
AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()
def _add_notes(self):
xblocks = self.course_fixture.get_nested_xblocks(category="html")
notes_list = []
for index, xblock in enumerate(xblocks):
notes_list.append(
Note(
user=self.username,
usage_id=xblock.locator,
course_id=self.course_fixture._course_key,
ranges=[Range(startOffset=index, endOffset=index + 5)]
)
)
self.edxnotes_fixture.create_notes(notes_list)
self.edxnotes_fixture.install()
@attr('shard_4')
class EdxNotesDefaultInteractionsTest(EdxNotesTestMixin):
"""
Tests for creation, editing, deleting annotations inside annotatable components in LMS.
"""
def create_notes(self, components, offset=0):
self.assertGreater(len(components), 0)
index = offset
for component in components:
for note in component.create_note(".{}".format(self.selector)):
note.text = "TEST TEXT {}".format(index)
index += 1
def edit_notes(self, components, offset=0):
self.assertGreater(len(components), 0)
index = offset
for component in components:
self.assertGreater(len(component.notes), 0)
for note in component.edit_note():
note.text = "TEST TEXT {}".format(index)
index += 1
def edit_tags_in_notes(self, components, tags):
self.assertGreater(len(components), 0)
index = 0
for component in components:
self.assertGreater(len(component.notes), 0)
for note in component.edit_note():
note.tags = tags[index]
index += 1
self.assertEqual(index, len(tags), "Number of supplied tags did not match components")
def remove_notes(self, components):
self.assertGreater(len(components), 0)
for component in components:
self.assertGreater(len(component.notes), 0)
component.remove_note()
def assert_notes_are_removed(self, components):
for component in components:
self.assertEqual(0, len(component.notes))
def assert_text_in_notes(self, notes):
actual = [note.text for note in notes]
expected = ["TEST TEXT {}".format(i) for i in xrange(len(notes))]
self.assertEqual(expected, actual)
def assert_tags_in_notes(self, notes, expected_tags):
actual = [note.tags for note in notes]
expected = [expected_tags[i] for i in xrange(len(notes))]
self.assertEqual(expected, actual)
def test_can_create_notes(self):
"""
Scenario: User can create notes.
Given I have a course with 3 annotatable components
And I open the unit with 2 annotatable components
When I add 2 notes for the first component and 1 note for the second
Then I see that notes were correctly created
When I change sequential position to "2"
And I add note for the annotatable component on the page
Then I see that note was correctly created
When I refresh the page
Then I see that note was correctly stored
When I change sequential position to "1"
Then I see that notes were correctly stored on the page
"""
self.note_unit_page.visit()
components = self.note_unit_page.components
self.create_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
self.course_nav.go_to_sequential_position(2)
components = self.note_unit_page.components
self.create_notes(components)
components = self.note_unit_page.refresh()
self.assert_text_in_notes(self.note_unit_page.notes)
self.course_nav.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_text_in_notes(self.note_unit_page.notes)
def test_can_edit_notes(self):
"""
Scenario: User can edit notes.
Given I have a course with 3 components with notes
And I open the unit with 2 annotatable components
When I change text in the notes
Then I see that notes were correctly changed
When I change sequential position to "2"
And I change the note on the page
Then I see that note was correctly changed
When I refresh the page
Then I see that edited note was correctly stored
When I change sequential position to "1"
Then I see that edited notes were correctly stored on the page
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.edit_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
self.course_nav.go_to_sequential_position(2)
components = self.note_unit_page.components
self.edit_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
components = self.note_unit_page.refresh()
self.assert_text_in_notes(self.note_unit_page.notes)
self.course_nav.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_text_in_notes(self.note_unit_page.notes)
def test_can_delete_notes(self):
"""
Scenario: User can delete notes.
Given I have a course with 3 components with notes
And I open the unit with 2 annotatable components
When I remove all notes on the page
Then I do not see any notes on the page
When I change sequential position to "2"
And I remove all notes on the page
Then I do not see any notes on the page
When I refresh the page
Then I do not see any notes on the page
When I change sequential position to "1"
Then I do not see any notes on the page
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.remove_notes(components)
self.assert_notes_are_removed(components)
self.course_nav.go_to_sequential_position(2)
components = self.note_unit_page.components
self.remove_notes(components)
self.assert_notes_are_removed(components)
components = self.note_unit_page.refresh()
self.assert_notes_are_removed(components)
self.course_nav.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_notes_are_removed(components)
def test_can_create_note_with_tags(self):
"""
Scenario: a user of notes can define one with tags
Given I have a course with 3 annotatable components
And I open the unit with 2 annotatable components
When I add a note with tags for the first component
And I refresh the page
Then I see that note was correctly stored with its tags
"""
self.note_unit_page.visit()
components = self.note_unit_page.components
for note in components[0].create_note(".{}".format(self.selector)):
note.tags = ["fruit", "tasty"]
self.note_unit_page.refresh()
self.assertEqual(["fruit", "tasty"], self.note_unit_page.notes[0].tags)
def test_can_change_tags(self):
"""
Scenario: a user of notes can edit tags on notes
Given I have a course with 3 components with notes
When I open the unit with 2 annotatable components
And I edit tags on the notes for the 2 annotatable components
Then I see that the tags were correctly changed
And I again edit tags on the notes for the 2 annotatable components
And I refresh the page
Then I see that the tags were correctly changed
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.edit_tags_in_notes(components, [["hard"], ["apple", "pear"]])
self.assert_tags_in_notes(self.note_unit_page.notes, [["hard"], ["apple", "pear"]])
self.edit_tags_in_notes(components, [[], ["avocado"]])
self.assert_tags_in_notes(self.note_unit_page.notes, [[], ["avocado"]])
self.note_unit_page.refresh()
self.assert_tags_in_notes(self.note_unit_page.notes, [[], ["avocado"]])
def test_sr_labels(self):
"""
Scenario: screen reader labels exist for text and tags fields
Given I have a course with 3 components with notes
When I open the unit with 2 annotatable components
And I open the editor for each note
Then the text and tags fields both have screen reader labels
"""
self._add_notes()
self.note_unit_page.visit()
# First note is in the first annotatable component, will have field indexes 0 and 1.
for note in self.note_unit_page.components[0].edit_note():
self.assertTrue(note.has_sr_label(0, 0, "Note"))
self.assertTrue(note.has_sr_label(1, 1, "Tags (space-separated)"))
# Second note is in the second annotatable component, will have field indexes 2 and 3.
for note in self.note_unit_page.components[1].edit_note():
self.assertTrue(note.has_sr_label(0, 2, "Note"))
self.assertTrue(note.has_sr_label(1, 3, "Tags (space-separated)"))
@attr('shard_4')
class EdxNotesPageTest(EventsTestMixin, EdxNotesTestMixin):
"""
Tests for Notes page.
"""
def _add_notes(self, notes_list):
self.edxnotes_fixture.create_notes(notes_list)
self.edxnotes_fixture.install()
def _add_default_notes(self, tags=None):
"""
Creates 5 test notes. If tags are not specified, will populate the notes with some test tag data.
If tags are specified, they will be used for each of the 3 notes that have tags.
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
# pylint: disable=attribute-defined-outside-init
self.raw_note_list = [
Note(
usage_id=xblocks[4].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="First note",
quote="Annotate this text",
updated=datetime(2011, 1, 1, 1, 1, 1, 1).isoformat(),
),
Note(
usage_id=xblocks[2].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="",
quote=u"Annotate this text",
updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["Review", "cool"] if tags is None else tags
),
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Third note",
quote="Annotate this text",
updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=18)],
tags=["Cool", "TODO"] if tags is None else tags
),
Note(
usage_id=xblocks[3].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Fourth note",
quote="",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["review"] if tags is None else tags
),
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Fifth note",
quote="Annotate this text",
updated=datetime(2015, 1, 1, 1, 1, 1, 1).isoformat()
),
]
self._add_notes(self.raw_note_list)
def assertNoteContent(self, item, text=None, quote=None, unit_name=None, time_updated=None, tags=None):
""" Verifies the expected properties of the note. """
self.assertEqual(text, item.text)
if item.quote is not None:
self.assertIn(quote, item.quote)
else:
self.assertIsNone(quote)
self.assertEqual(unit_name, item.unit_name)
self.assertEqual(time_updated, item.time_updated)
self.assertEqual(tags, item.tags)
def assertChapterContent(self, item, title=None, subtitles=None):
"""
Verifies the expected title and subsection titles (subtitles) for the given chapter.
"""
self.assertEqual(item.title, title)
self.assertEqual(item.subtitles, subtitles)
def assertGroupContent(self, item, title=None, notes=None):
"""
Verifies the expected title and child notes for the given group.
"""
self.assertEqual(item.title, title)
self.assertEqual(item.notes, notes)
def assert_viewed_event(self, view=None):
"""
Verifies that the correct view event was captured for the Notes page.
"""
# There will always be an initial event for "Recent Activity" because that is the default view.
# If view is something besides "Recent Activity", expect 2 events, with the second one being
# the view name passed in.
if view == 'Recent Activity':
view = None
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.notes_page_viewed'},
number_of_matches=1 if view is None else 2
)
expected_events = [{'event': {'view': 'Recent Activity'}}]
if view:
expected_events.append({'event': {'view': view}})
self.assert_events_match(expected_events, actual_events)
def assert_unit_link_event(self, usage_id, view):
"""
Verifies that the correct used_unit_link event was captured for the Notes page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.used_unit_link'},
number_of_matches=1
)
expected_events = [
{'event': {'component_usage_id': usage_id, 'view': view}}
]
self.assert_events_match(expected_events, actual_events)
def assert_search_event(self, search_string, number_of_results):
"""
Verifies that the correct searched event was captured for the Notes page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.searched'},
number_of_matches=1
)
expected_events = [
{'event': {'search_string': search_string, 'number_of_results': number_of_results}}
]
self.assert_events_match(expected_events, actual_events)
def test_no_content(self):
"""
Scenario: User can see `No content` message.
Given I have a course without notes
When I open Notes page
Then I see only "You do not have any notes within the course." message
"""
notes_page_empty = EdxNotesPageNoContent(self.browser, self.course_id)
notes_page_empty.visit()
self.assertIn(
"You have not made any notes in this course yet. Other students in this course are using notes to:",
notes_page_empty.no_content_text)
def test_recent_activity_view(self):
"""
Scenario: User can view all notes by recent activity.
Given I have a course with 5 notes
When I open Notes page
Then I see 5 notes sorted by the updated date
And I see correct content in the notes
And an event has fired indicating that the Recent Activity view was selected
"""
self._add_default_notes()
self.notes_page.visit()
notes = self.notes_page.notes
self.assertEqual(len(notes), 5)
self.assertNoteContent(
notes[0],
quote=u"Annotate this text",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[2],
quote="Annotate this text",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this text",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this text",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event()
def test_course_structure_view(self):
"""
Scenario: User can view all notes by location in Course.
Given I have a course with 5 notes
When I open Notes page
And I switch to "Location in Course" view
Then I see 2 groups, 3 sections and 5 notes
And I see correct content in the notes and groups
And an event has fired indicating that the Location in Course view was selected
"""
self._add_default_notes()
self.notes_page.visit().switch_to_tab("structure")
notes = self.notes_page.notes
groups = self.notes_page.chapter_groups
sections = self.notes_page.subsection_groups
self.assertEqual(len(notes), 5)
self.assertEqual(len(groups), 2)
self.assertEqual(len(sections), 3)
self.assertChapterContent(
groups[0],
title=u"Test Section 1",
subtitles=[u"Test Subsection 1", u"Test Subsection 2"]
)
self.assertGroupContent(
sections[0],
title=u"Test Subsection 1",
notes=[u"Fifth note", u"Third note", None]
)
self.assertNoteContent(
notes[0],
quote=u"Annotate this text",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
quote=u"Annotate this text",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[2],
quote=u"Annotate this text",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
self.assertGroupContent(
sections[1],
title=u"Test Subsection 2",
notes=[u"Fourth note"]
)
self.assertNoteContent(
notes[3],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertChapterContent(
groups[1],
title=u"Test Section 2",
subtitles=[u"Test Subsection 3"],
)
self.assertGroupContent(
sections[2],
title=u"Test Subsection 3",
notes=[u"First note"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this text",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Location in Course')
def test_tags_view(self):
"""
Scenario: User can view all notes by associated tags.
Given I have a course with 5 notes and I am viewing the Notes page
When I switch to the "Tags" view
Then I see 4 tag groups
And I see correct content in the notes and groups
And an event has fired indicating that the Tags view was selected
"""
self._add_default_notes()
self.notes_page.visit().switch_to_tab("tags")
notes = self.notes_page.notes
groups = self.notes_page.tag_groups
self.assertEqual(len(notes), 7)
self.assertEqual(len(groups), 4)
# Tag group "cool"
self.assertGroupContent(
groups[0],
title=u"cool (2)",
notes=[u"Third note", None]
)
self.assertNoteContent(
notes[0],
quote=u"Annotate this text",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[1],
quote=u"Annotate this text",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
# Tag group "review"
self.assertGroupContent(
groups[1],
title=u"review (2)",
notes=[u"Fourth note", None]
)
self.assertNoteContent(
notes[2],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this text",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
# Tag group "todo"
self.assertGroupContent(
groups[2],
title=u"todo (1)",
notes=["Third note"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this text",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
# Notes with no tags
self.assertGroupContent(
groups[3],
title=u"[no tags] (2)",
notes=["Fifth note", "First note"]
)
self.assertNoteContent(
notes[5],
quote=u"Annotate this text",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[6],
quote=u"Annotate this text",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Tags')
def test_easy_access_from_notes_page(self):
"""
Scenario: Ensure that the link to the Unit works correctly.
Given I have a course with 5 notes
When I open Notes page
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I switch to "Location in Course" view
And I click on the second unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I switch to "Tags" view
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I run the search with "Fifth" query
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
"""
def assert_page(note, usage_id, view):
""" Verify that clicking on the unit link works properly. """
quote = note.quote
note.go_to_unit()
self.courseware_page.wait_for_page()
self.assertIn(quote, self.courseware_page.xblock_component_html_content())
self.assert_unit_link_event(usage_id, view)
self.reset_event_tracking()
self._add_default_notes()
self.notes_page.visit()
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[4]['usage_id'], "Recent Activity")
self.notes_page.visit().switch_to_tab("structure")
note = self.notes_page.notes[1]
assert_page(note, self.raw_note_list[2]['usage_id'], "Location in Course")
self.notes_page.visit().switch_to_tab("tags")
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[2]['usage_id'], "Tags")
self.notes_page.visit().search("Fifth")
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[4]['usage_id'], "Search Results")
def test_search_behaves_correctly(self):
"""
Scenario: Searching behaves correctly.
Given I have a course with 5 notes
When I open Notes page
When I run the search with " " query
Then I see the following error message "Please enter a term in the search field."
And I do not see "Search Results" tab
When I run the search with "note" query
Then I see that error message disappears
And I see that "Search Results" tab appears with 4 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
"""
self._add_default_notes()
self.notes_page.visit()
# Run the search with whitespaces only
self.notes_page.search(" ")
# Displays error message
self.assertTrue(self.notes_page.is_error_visible)
self.assertEqual(self.notes_page.error_text, u"Please enter a term in the search field.")
# Search results tab does not appear
self.assertNotIn(u"Search Results", self.notes_page.tabs)
# Run the search with correct query
self.notes_page.search("note")
# Error message disappears
self.assertFalse(self.notes_page.is_error_visible)
self.assertIn(u"Search Results", self.notes_page.tabs)
notes = self.notes_page.notes
self.assertEqual(len(notes), 4)
self.assertNoteContent(
notes[0],
quote=u"Annotate this text",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[2],
quote="Annotate this text",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this text",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 4)
def test_scroll_to_tag_recent_activity(self):
"""
Scenario: Can scroll to a tag group from the Recent Activity view (default view)
Given I have a course with 5 notes and I open the Notes page
When I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit()
self._scroll_to_tag_and_verify("pear", 3)
def test_scroll_to_tag_course_structure(self):
"""
Scenario: Can scroll to a tag group from the Course Structure view
Given I have a course with 5 notes and I open the Notes page and select the Course Structure view
When I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().switch_to_tab("structure")
self._scroll_to_tag_and_verify("squash", 5)
def test_scroll_to_tag_search(self):
"""
Scenario: Can scroll to a tag group from the Search Results view
Given I have a course with 5 notes and I open the Notes page and perform a search
Then the Search view tab opens and gets focus
And when I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().search("note")
self._scroll_to_tag_and_verify("pumpkin", 4)
def test_scroll_to_tag_from_tag_view(self):
"""
Scenario: Can scroll to a tag group from the Tags view
Given I have a course with 5 notes and I open the Notes page and select the Tag view
When I click on a tag associated with a note
Then I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().switch_to_tab("tags")
self._scroll_to_tag_and_verify("kiwi", 2)
def _scroll_to_tag_and_verify(self, tag_name, group_index):
""" Helper method for all scroll to tag tests """
self.notes_page.notes[1].go_to_tag(tag_name)
# Because all the notes (with tags) have the same tags, they will end up ordered alphabetically.
pear_group = self.notes_page.tag_groups[group_index]
self.assertEqual(tag_name + " (3)", pear_group.title)
self.assertTrue(pear_group.scrolled_to_top(group_index))
def test_tabs_behaves_correctly(self):
"""
Scenario: Tabs behaves correctly.
Given I have a course with 5 notes
When I open Notes page
Then I see only "Recent Activity", "Location in Course", and "Tags" tabs
When I run the search with "note" query
And I see that "Search Results" tab appears with 4 notes found
Then I switch to "Recent Activity" tab
And I see all 5 notes
Then I switch to "Location in Course" tab
And I see all 2 groups and 5 notes
When I switch back to "Search Results" tab
Then I can still see 4 notes found
When I close "Search Results" tab
Then I see that "Recent Activity" tab becomes active
And "Search Results" tab disappears
And I see all 5 notes
"""
self._add_default_notes()
self.notes_page.visit()
# We're on Recent Activity tab.
self.assertEqual(len(self.notes_page.tabs), 3)
self.assertEqual([u"Recent Activity", u"Location in Course", u"Tags"], self.notes_page.tabs)
self.notes_page.search("note")
# We're on Search Results tab
self.assertEqual(len(self.notes_page.tabs), 4)
self.assertIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(len(self.notes_page.notes), 4)
# We can switch on Recent Activity tab and back.
self.notes_page.switch_to_tab("recent")
self.assertEqual(len(self.notes_page.notes), 5)
self.notes_page.switch_to_tab("structure")
self.assertEqual(len(self.notes_page.chapter_groups), 2)
self.assertEqual(len(self.notes_page.notes), 5)
self.notes_page.switch_to_tab("search")
self.assertEqual(len(self.notes_page.notes), 4)
# Can close search results page
self.notes_page.close_tab()
self.assertEqual(len(self.notes_page.tabs), 3)
self.assertNotIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(len(self.notes_page.notes), 5)
def test_open_note_when_accessed_from_notes_page(self):
"""
Scenario: Ensure that the link to the Unit opens a note only once.
Given I have a course with 2 sequentials that contain respectively one note and two notes
When I open Notes page
And I click on the first unit link
Then I see the note opened on the unit page
When I switch to the second sequential
I do not see any note opened
When I switch back to first sequential
I do not see any note opened
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
self._add_notes([
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Third note",
quote="Annotate this text",
updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=19)],
),
Note(
usage_id=xblocks[2].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Second note",
quote="Annotate this text",
updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=19)],
),
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="First note",
quote="Annotate this text",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=19)],
),
])
self.notes_page.visit()
item = self.notes_page.notes[0]
item.go_to_unit()
self.courseware_page.wait_for_page()
note = self.note_unit_page.notes[0]
self.assertTrue(note.is_visible)
note = self.note_unit_page.notes[1]
self.assertFalse(note.is_visible)
self.course_nav.go_to_sequential_position(2)
note = self.note_unit_page.notes[0]
self.assertFalse(note.is_visible)
self.course_nav.go_to_sequential_position(1)
note = self.note_unit_page.notes[0]
self.assertFalse(note.is_visible)
@attr('shard_4')
class EdxNotesToggleSingleNoteTest(EdxNotesTestMixin):
"""
Tests for toggling single annotation.
"""
def setUp(self):
super(EdxNotesToggleSingleNoteTest, self).setUp()
self._add_notes()
self.note_unit_page.visit()
def test_can_toggle_by_clicking_on_highlighted_text(self):
"""
Scenario: User can toggle a single note by clicking on highlighted text.
Given I have a course with components with notes
When I click on highlighted text
And I move mouse out of the note
Then I see that the note is still shown
When I click outside the note
Then I see the the note is closed
"""
note = self.note_unit_page.notes[0]
note.click_on_highlight()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note.is_visible)
self.note_unit_page.click("body")
self.assertFalse(note.is_visible)
def test_can_toggle_by_clicking_on_the_note(self):
"""
Scenario: User can toggle a single note by clicking on the note.
Given I have a course with components with notes
When I click on the note
And I move mouse out of the note
Then I see that the note is still shown
When I click outside the note
Then I see the the note is closed
"""
note = self.note_unit_page.notes[0]
note.show().click_on_viewer()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note.is_visible)
self.note_unit_page.click("body")
self.assertFalse(note.is_visible)
def test_interaction_between_notes(self):
"""
Scenario: Interactions between notes works well.
Given I have a course with components with notes
When I click on highlighted text in the first component
And I move mouse out of the note
Then I see that the note is still shown
When I click on highlighted text in the second component
Then I see that the new note is shown
"""
note_1 = self.note_unit_page.notes[0]
note_2 = self.note_unit_page.notes[1]
note_1.click_on_highlight()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note_1.is_visible)
note_2.click_on_highlight()
self.assertFalse(note_1.is_visible)
self.assertTrue(note_2.is_visible)
@attr('shard_4')
class EdxNotesToggleNotesTest(EdxNotesTestMixin):
"""
Tests for toggling visibility of all notes.
"""
def setUp(self):
super(EdxNotesToggleNotesTest, self).setUp()
self._add_notes()
self.note_unit_page.visit()
def test_can_disable_all_notes(self):
"""
Scenario: User can disable all notes.
Given I have a course with components with notes
And I open the unit with annotatable components
When I click on "Show notes" checkbox
Then I do not see any notes on the sequential position
When I change sequential position to "2"
Then I still do not see any notes on the sequential position
When I go to "Test Subsection 2" subsection
Then I do not see any notes on the subsection
"""
# Disable all notes
self.note_unit_page.toggle_visibility()
self.assertEqual(len(self.note_unit_page.notes), 0)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(len(self.note_unit_page.notes), 0)
self.course_nav.go_to_section(u"Test Section 1", u"Test Subsection 2")
self.assertEqual(len(self.note_unit_page.notes), 0)
def test_can_reenable_all_notes(self):
"""
Scenario: User can toggle notes visibility.
Given I have a course with components with notes
And I open the unit with annotatable components
When I click on "Show notes" checkbox
Then I do not see any notes on the sequential position
When I click on "Show notes" checkbox again
Then I see that all notes appear
When I change sequential position to "2"
Then I still can see all notes on the sequential position
When I go to "Test Subsection 2" subsection
Then I can see all notes on the subsection
"""
# Disable notes
self.note_unit_page.toggle_visibility()
self.assertEqual(len(self.note_unit_page.notes), 0)
# Enable notes to make sure that I can enable notes without refreshing
# the page.
self.note_unit_page.toggle_visibility()
self.assertGreater(len(self.note_unit_page.notes), 0)
self.course_nav.go_to_sequential_position(2)
self.assertGreater(len(self.note_unit_page.notes), 0)
self.course_nav.go_to_section(u"Test Section 1", u"Test Subsection 2")
self.assertGreater(len(self.note_unit_page.notes), 0)
| agpl-3.0 |
LouisPlisso/pytomo | pytomo/fpdf/fonts.py | 34 | 26574 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# Fonts:
fpdf_charwidths = {}
fpdf_charwidths['courier']={}
for i in xrange(0,256):
fpdf_charwidths['courier'][chr(i)]=600
fpdf_charwidths['courierB']=fpdf_charwidths['courier']
fpdf_charwidths['courierI']=fpdf_charwidths['courier']
fpdf_charwidths['courierBI']=fpdf_charwidths['courier']
fpdf_charwidths['helvetica']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
'\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
'\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
fpdf_charwidths['helveticaB']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,
'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,
'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556,
'\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611,
'\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556
}
fpdf_charwidths['helveticaBI']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,
'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,
'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556,
'\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611,
'\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556}
fpdf_charwidths['helveticaI']={
'\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
'\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
'\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
'\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
'\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
fpdf_charwidths['symbol']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':713,'#':500,'$':549,'%':833,'&':778,'\'':439,'(':333,')':333,'*':500,'+':549,
',':250,'-':549,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':549,'=':549,'>':549,'?':444,'@':549,'A':722,
'B':667,'C':722,'D':612,'E':611,'F':763,'G':603,'H':722,'I':333,'J':631,'K':722,'L':686,'M':889,'N':722,'O':722,'P':768,'Q':741,'R':556,'S':592,'T':611,'U':690,'V':439,'W':768,
'X':645,'Y':795,'Z':611,'[':333,'\\':863,']':333,'^':658,'_':500,'`':500,'a':631,'b':549,'c':549,'d':494,'e':439,'f':521,'g':411,'h':603,'i':329,'j':603,'k':549,'l':549,'m':576,
'n':521,'o':549,'p':549,'q':521,'r':549,'s':603,'t':439,'u':576,'v':713,'w':686,'x':493,'y':686,'z':494,'{':480,'|':200,'}':480,'~':549,'\x7f':0,'\x80':0,'\x81':0,'\x82':0,'\x83':0,
'\x84':0,'\x85':0,'\x86':0,'\x87':0,'\x88':0,'\x89':0,'\x8a':0,'\x8b':0,'\x8c':0,'\x8d':0,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0,
'\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':750,'\xa1':620,'\xa2':247,'\xa3':549,'\xa4':167,'\xa5':713,'\xa6':500,'\xa7':753,'\xa8':753,'\xa9':753,'\xaa':753,'\xab':1042,'\xac':987,'\xad':603,'\xae':987,'\xaf':603,
'\xb0':400,'\xb1':549,'\xb2':411,'\xb3':549,'\xb4':549,'\xb5':713,'\xb6':494,'\xb7':460,'\xb8':549,'\xb9':549,'\xba':549,'\xbb':549,'\xbc':1000,'\xbd':603,'\xbe':1000,'\xbf':658,'\xc0':823,'\xc1':686,'\xc2':795,'\xc3':987,'\xc4':768,'\xc5':768,
'\xc6':823,'\xc7':768,'\xc8':768,'\xc9':713,'\xca':713,'\xcb':713,'\xcc':713,'\xcd':713,'\xce':713,'\xcf':713,'\xd0':768,'\xd1':713,'\xd2':790,'\xd3':790,'\xd4':890,'\xd5':823,'\xd6':549,'\xd7':250,'\xd8':713,'\xd9':603,'\xda':603,'\xdb':1042,
'\xdc':987,'\xdd':603,'\xde':987,'\xdf':603,'\xe0':494,'\xe1':329,'\xe2':790,'\xe3':790,'\xe4':786,'\xe5':713,'\xe6':384,'\xe7':384,'\xe8':384,'\xe9':384,'\xea':384,'\xeb':384,'\xec':494,'\xed':494,'\xee':494,'\xef':494,'\xf0':0,'\xf1':329,
'\xf2':274,'\xf3':686,'\xf4':686,'\xf5':686,'\xf6':384,'\xf7':384,'\xf8':384,'\xf9':384,'\xfa':384,'\xfb':384,'\xfc':494,'\xfd':494,'\xfe':494,'\xff':0}
fpdf_charwidths['times']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':408,'#':500,'$':500,'%':833,'&':778,'\'':180,'(':333,')':333,'*':500,'+':564,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':564,'=':564,'>':564,'?':444,'@':921,'A':722,
'B':667,'C':667,'D':722,'E':611,'F':556,'G':722,'H':722,'I':333,'J':389,'K':722,'L':611,'M':889,'N':722,'O':722,'P':556,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':722,'W':944,
'X':722,'Y':722,'Z':611,'[':333,'\\':278,']':333,'^':469,'_':500,'`':333,'a':444,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':500,'i':278,'j':278,'k':500,'l':278,'m':778,
'n':500,'o':500,'p':500,'q':500,'r':333,'s':389,'t':278,'u':500,'v':500,'w':722,'x':500,'y':500,'z':444,'{':480,'|':200,'}':480,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':444,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':889,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':444,'\x94':444,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':980,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':200,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':564,'\xad':333,'\xae':760,'\xaf':333,
'\xb0':400,'\xb1':564,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':453,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':444,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':564,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':722,'\xde':556,'\xdf':500,'\xe0':444,'\xe1':444,'\xe2':444,'\xe3':444,'\xe4':444,'\xe5':444,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':564,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':500,'\xfe':500,'\xff':500}
fpdf_charwidths['timesB']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':555,'#':500,'$':500,'%':1000,'&':833,'\'':278,'(':333,')':333,'*':500,'+':570,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':930,'A':722,
'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':778,'I':389,'J':500,'K':778,'L':667,'M':944,'N':722,'O':778,'P':611,'Q':778,'R':722,'S':556,'T':667,'U':722,'V':722,'W':1000,
'X':722,'Y':722,'Z':667,'[':333,'\\':278,']':333,'^':581,'_':500,'`':333,'a':500,'b':556,'c':444,'d':556,'e':444,'f':333,'g':500,'h':556,'i':278,'j':333,'k':556,'l':278,'m':833,
'n':556,'o':500,'p':556,'q':556,'r':444,'s':389,'t':333,'u':556,'v':500,'w':722,'x':500,'y':500,'z':444,'{':394,'|':220,'}':394,'~':520,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':667,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':300,'\xab':500,'\xac':570,'\xad':333,'\xae':747,'\xaf':333,
'\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':556,'\xb6':540,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':330,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':570,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':722,'\xde':611,'\xdf':556,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
fpdf_charwidths['timesBI']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':389,'"':555,'#':500,'$':500,'%':833,'&':778,'\'':278,'(':333,')':333,'*':500,'+':570,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':832,'A':667,
'B':667,'C':667,'D':722,'E':667,'F':667,'G':722,'H':778,'I':389,'J':500,'K':667,'L':611,'M':889,'N':722,'O':722,'P':611,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':667,'W':889,
'X':667,'Y':611,'Z':611,'[':333,'\\':278,']':333,'^':570,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':556,'i':278,'j':278,'k':500,'l':278,'m':778,
'n':556,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':556,'v':444,'w':667,'x':500,'y':444,'z':389,'{':348,'|':220,'}':348,'~':570,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':389,'\x9f':611,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':266,'\xab':500,'\xac':606,'\xad':333,'\xae':747,'\xaf':333,
'\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':576,'\xb6':500,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':300,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
'\xc6':944,'\xc7':667,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':570,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':611,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':444,'\xfe':500,'\xff':444}
fpdf_charwidths['timesI']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':420,'#':500,'$':500,'%':833,'&':778,'\'':214,'(':333,')':333,'*':500,'+':675,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':675,'=':675,'>':675,'?':500,'@':920,'A':611,
'B':611,'C':667,'D':722,'E':611,'F':611,'G':722,'H':722,'I':333,'J':444,'K':667,'L':556,'M':833,'N':667,'O':722,'P':611,'Q':722,'R':611,'S':500,'T':556,'U':722,'V':611,'W':833,
'X':611,'Y':556,'Z':556,'[':389,'\\':278,']':389,'^':422,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':278,'g':500,'h':500,'i':278,'j':278,'k':444,'l':278,'m':722,
'n':500,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':500,'v':444,'w':667,'x':444,'y':444,'z':389,'{':400,'|':275,'}':400,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':556,'\x85':889,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':500,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':556,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':556,'\x94':556,'\x95':350,'\x96':500,'\x97':889,'\x98':333,'\x99':980,
'\x9a':389,'\x9b':333,'\x9c':667,'\x9d':350,'\x9e':389,'\x9f':556,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':275,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':675,'\xad':333,'\xae':760,'\xaf':333,
'\xb0':400,'\xb1':675,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':523,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':611,'\xc1':611,'\xc2':611,'\xc3':611,'\xc4':611,'\xc5':611,
'\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':667,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':675,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':556,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':675,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':444,'\xfe':500,'\xff':444}
fpdf_charwidths['zapfdingbats']={
'\x00':0,'\x01':0,'\x02':0,'\x03':0,'\x04':0,'\x05':0,'\x06':0,'\x07':0,'\x08':0,'\t':0,'\n':0,'\x0b':0,'\x0c':0,'\r':0,'\x0e':0,'\x0f':0,'\x10':0,'\x11':0,'\x12':0,'\x13':0,'\x14':0,'\x15':0,
'\x16':0,'\x17':0,'\x18':0,'\x19':0,'\x1a':0,'\x1b':0,'\x1c':0,'\x1d':0,'\x1e':0,'\x1f':0,' ':278,'!':974,'"':961,'#':974,'$':980,'%':719,'&':789,'\'':790,'(':791,')':690,'*':960,'+':939,
',':549,'-':855,'.':911,'/':933,'0':911,'1':945,'2':974,'3':755,'4':846,'5':762,'6':761,'7':571,'8':677,'9':763,':':760,';':759,'<':754,'=':494,'>':552,'?':537,'@':577,'A':692,
'B':786,'C':788,'D':788,'E':790,'F':793,'G':794,'H':816,'I':823,'J':789,'K':841,'L':823,'M':833,'N':816,'O':831,'P':923,'Q':744,'R':723,'S':749,'T':790,'U':792,'V':695,'W':776,
'X':768,'Y':792,'Z':759,'[':707,'\\':708,']':682,'^':701,'_':826,'`':815,'a':789,'b':789,'c':707,'d':687,'e':696,'f':689,'g':786,'h':787,'i':713,'j':791,'k':785,'l':791,'m':873,
'n':761,'o':762,'p':762,'q':759,'r':759,'s':892,'t':892,'u':788,'v':784,'w':438,'x':138,'y':277,'z':415,'{':392,'|':392,'}':668,'~':668,'\x7f':0,'\x80':390,'\x81':390,'\x82':317,'\x83':317,
'\x84':276,'\x85':276,'\x86':509,'\x87':509,'\x88':410,'\x89':410,'\x8a':234,'\x8b':234,'\x8c':334,'\x8d':334,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0,
'\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':0,'\xa1':732,'\xa2':544,'\xa3':544,'\xa4':910,'\xa5':667,'\xa6':760,'\xa7':760,'\xa8':776,'\xa9':595,'\xaa':694,'\xab':626,'\xac':788,'\xad':788,'\xae':788,'\xaf':788,
'\xb0':788,'\xb1':788,'\xb2':788,'\xb3':788,'\xb4':788,'\xb5':788,'\xb6':788,'\xb7':788,'\xb8':788,'\xb9':788,'\xba':788,'\xbb':788,'\xbc':788,'\xbd':788,'\xbe':788,'\xbf':788,'\xc0':788,'\xc1':788,'\xc2':788,'\xc3':788,'\xc4':788,'\xc5':788,
'\xc6':788,'\xc7':788,'\xc8':788,'\xc9':788,'\xca':788,'\xcb':788,'\xcc':788,'\xcd':788,'\xce':788,'\xcf':788,'\xd0':788,'\xd1':788,'\xd2':788,'\xd3':788,'\xd4':894,'\xd5':838,'\xd6':1016,'\xd7':458,'\xd8':748,'\xd9':924,'\xda':748,'\xdb':918,
'\xdc':927,'\xdd':928,'\xde':928,'\xdf':834,'\xe0':873,'\xe1':828,'\xe2':924,'\xe3':924,'\xe4':917,'\xe5':930,'\xe6':931,'\xe7':463,'\xe8':883,'\xe9':836,'\xea':836,'\xeb':867,'\xec':867,'\xed':696,'\xee':696,'\xef':874,'\xf0':0,'\xf1':874,
'\xf2':760,'\xf3':946,'\xf4':771,'\xf5':865,'\xf6':771,'\xf7':888,'\xf8':967,'\xf9':888,'\xfa':831,'\xfb':873,'\xfc':927,'\xfd':970,'\xfe':918,'\xff':0}
| gpl-2.0 |
BaconPancakes/valor | lib/pip/_vendor/colorama/ansitowin32.py | 450 | 9668 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
| gpl-3.0 |
hypnotika/namebench | libnamebench/site_connector.py | 175 | 4048 | # Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class used for connecting to the results site."""
import os
import platform
import random
import socket
import sys
import tempfile
import time
import urllib
import zlib
# third_party
import httplib2
import simplejson
import util
RETRY_WAIT = 10
class SiteConnector(object):
"""Methods that connect to the results site."""
def __init__(self, config, status_callback=None):
self.config = config
self.url = self.config.site_url.rstrip('/')
self.status_callback = status_callback
def msg(self, msg, count=None, total=None, **kwargs):
if self.status_callback:
self.status_callback(msg, count=count, total=total, **kwargs)
else:
print '%s [%s/%s]' % (msg, count, total)
def GetIndexHosts(self):
"""Get a list of 'index' hosts for standardized testing."""
url = self.url + '/index_hosts'
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
content = None
try:
unused_resp, content = h.request(url, 'GET')
hosts = []
for record_type, host in simplejson.loads(content):
hosts.append((str(record_type), str(host)))
return hosts
except simplejson.decoder.JSONDecodeError:
self.msg('Failed to decode: "%s"' % content)
return []
except AttributeError:
self.msg('%s refused connection' % url)
return []
except:
self.msg('* Failed to fetch %s: %s' % (url, util.GetLastExceptionString()))
return []
def UploadJsonResults(self, json_data, hide_results=False, fail_quickly=False):
"""Data is generated by reporter.CreateJsonData."""
url = self.url + '/submit'
if not url or not url.startswith('http'):
return (False, 'error')
h = httplib2.Http()
post_data = {
'client_id': self._CalculateDuplicateCheckId(),
'submit_id': random.randint(0, 2**32),
'hidden': bool(hide_results),
'data': json_data
}
try:
resp, content = h.request(url, 'POST', urllib.urlencode(post_data))
try:
data = simplejson.loads(content)
for note in data['notes']:
print ' * %s' % note
return (''.join((self.url, data['url'])), data['state'])
except:
self.msg('BAD RESPONSE from %s: [%s]:\n %s' % (url, resp, content))
print "DATA:"
print post_data
# See http://code.google.com/p/httplib2/issues/detail?id=62
except AttributeError:
self.msg('%s refused connection' % url)
except:
self.msg('Error uploading results: %s' % util.GetLastExceptionString())
# We haven't returned, something is up.
if not fail_quickly:
self.msg('Problem talking to %s, will retry after %ss' % (url, RETRY_WAIT))
time.sleep(RETRY_WAIT)
self.UploadJsonResults(json_data, hide_results=hide_results, fail_quickly=True)
return (False, 'error')
def _CalculateDuplicateCheckId(self):
"""This is so that we can detect duplicate submissions from a particular host.
Returns:
checksum: integer
"""
# From http://docs.python.org/release/2.5.2/lib/module-zlib.html
# "not suitable for use as a general hash algorithm."
#
# We are only using it as a temporary way to detect duplicate runs on the
# same host in a short time period, so it's accuracy is not important.
return zlib.crc32(platform.platform() + sys.version + platform.node() +
os.getenv('HOME', '') + os.getenv('USERPROFILE', ''))
| apache-2.0 |
nanobox-io/nanobox-pkgsrc-base | nodejs7/patches/patch-tools_gyp_pylib_gyp_generator_make.py | 16 | 1181 | $NetBSD: patch-tools_gyp_pylib_gyp_generator_make.py,v 1.3 2013/12/12 11:52:37 jperkin Exp $
Add support for NetBSD and DragonFly.
Ensure we use the system libtool on OSX.
--- tools/gyp/pylib/gyp/generator/make.py.orig 2013-12-12 05:20:06.000000000 +0000
+++ tools/gyp/pylib/gyp/generator/make.py
@@ -174,7 +174,7 @@ cmd_solink_module = $(LINK.$(TOOLSET)) -
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
@@ -2012,7 +2012,7 @@ def GenerateOutput(target_list, target_d
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
- elif flavor == 'freebsd':
+ elif flavor == 'freebsd' or flavor == 'dragonflybsd' or flavor == 'netbsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
| mit |
stanlee321/pysolper | latrop/lib/dist/tipfy/template.py | 9 | 21622 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like:
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
Loader is a class that loads templates from a root directory and caches
the compiled templates:
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% block %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. if and for blocks get
translated exactly into Python, do you can do complex expressions like:
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the escape() function in the examples above. You can pass
functions in to your template just like any other variable:
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions escape(), url_escape(), json_encode(), and squeeze()
to all templates by default.
"""
from __future__ import with_statement
import cStringIO
import datetime
import htmlentitydefs
import logging
import os.path
import re
import urllib
import xml.sax.saxutils
import zipfile
from .json import json_encode
def utf8(value):
"""Encodes a unicode value to UTF-8 if not yet encoded.
:param value:
Value to be encoded.
:returns:
An encoded string.
"""
if isinstance(value, unicode):
return value.encode("utf-8")
assert isinstance(value, str)
return value
def _unicode(value):
"""Encodes a string value to unicode if not yet decoded.
:param value:
Value to be decoded.
:returns:
A decoded string.
"""
if isinstance(value, str):
return value.decode("utf-8")
assert isinstance(value, unicode)
return value
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
:param value:
The value to be escaped.
:returns:
The escaped value.
"""
return utf8(xml.sax.saxutils.escape(value, {'"': """}))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string.
:param value:
The value to be un-escaped.
:returns:
The un-escaped value.
"""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value):
"""Returns a valid URL-encoded version of the given value."""
return urllib.quote_plus(utf8(value))
def _convert_entity(m):
if m.group(1) == "#":
try:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
return dict((name, unichr(value)) for \
name, value in htmlentitydefs.name2codepoint.iteritems())
_HTML_UNICODE_MAP = _build_unicode_map()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
reader = _TemplateReader(name, template_string)
self.file = _File(_parse(reader))
self.code = self._generate_python(loader, compress_whitespace)
try:
self.compiled = compile(self.code, self.name, "exec")
except:
formatted_code = _format_code(self.code).rstrip()
logging.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": xhtml_escape,
"url_escape": url_escape,
"json_encode": json_encode,
"squeeze": squeeze,
"datetime": datetime,
}
namespace.update(kwargs)
exec self.compiled in namespace
execute = namespace["_execute"]
try:
return execute()
except:
formatted_code = _format_code(self.code).rstrip()
logging.error("%s code:\n%s", self.name, formatted_code)
raise
def _generate_python(self, loader, compress_whitespace):
buffer = cStringIO.StringIO()
try:
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
self.file.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, self,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class Loader(object):
"""A template loader that loads from a single root directory.
You must use a template loader to use template constructs like
{% extends %} and {% include %}. Loader caches all templates after
they are loaded the first time.
"""
def __init__(self, root_directory):
self.root = os.path.abspath(root_directory)
self.templates = {}
def reset(self):
self.templates = {}
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def load(self, name, parent_path=None):
name = self.resolve_path(name, parent_path=parent_path)
if name not in self.templates:
path = os.path.join(self.root, name)
f = open(path, "r")
self.templates[name] = Template(f.read(), name=name, loader=self)
f.close()
return self.templates[name]
class ZipLoader(Loader):
"""A template loader that loads from a zip file and a root directory.
You must use a template loader to use template constructs like
{% extends %} and {% include %}. Loader caches all templates after
they are loaded the first time.
"""
def __init__(self, zip_path, root_directory):
self.zipfile = zipfile.ZipFile(zip_path, 'r')
self.root = os.path.join(root_directory)
self.templates = {}
def load(self, name, parent_path=None):
name = self.resolve_path(name, parent_path=parent_path)
if name not in self.templates:
path = os.path.join(self.root, name)
tpl = self.zipfile.read(path)
self.templates[name] = Template(tpl, name=name, loader=self)
return self.templates[name]
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, body):
self.body = body
def generate(self, writer):
writer.write_line("def _execute():")
with writer.indent():
writer.write_line("_buffer = []")
self.body.generate(writer)
writer.write_line("return ''.join(_buffer)")
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body=None):
self.name = name
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.named_blocks[self.name].generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self.body
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader):
self.name = name
self.template_name = reader.name
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
old = writer.current_template
writer.current_template = included
included.file.body.generate(writer)
writer.current_template = old
class _ApplyBlock(_Node):
def __init__(self, method, body=None):
self.method = method
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name)
with writer.indent():
writer.write_line("_buffer = []")
self.body.generate(writer)
writer.write_line("return ''.join(_buffer)")
writer.write_line("_buffer.append(%s(%s()))" % (
self.method, method_name))
class _ControlBlock(_Node):
def __init__(self, statement, body=None):
self.statement = statement
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement)
with writer.indent():
self.body.generate(writer)
class _IntermediateControlBlock(_Node):
def __init__(self, statement):
self.statement = statement
def generate(self, writer):
writer.write_line("%s:" % self.statement, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement):
self.statement = statement
def generate(self, writer):
writer.write_line(self.statement)
class _Expression(_Node):
def __init__(self, expression):
self.expression = expression
def generate(self, writer):
writer.write_line("_tmp = %s" % self.expression)
writer.write_line("if isinstance(_tmp, str): _buffer.append(_tmp)")
writer.write_line("elif isinstance(_tmp, unicode): "
"_buffer.append(_tmp.encode('utf-8'))")
writer.write_line("else: _buffer.append(str(_tmp))")
class _Text(_Node):
def __init__(self, value):
self.value = value
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_buffer.append(%r)' % value)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self._indent = 0
def indent(self):
return self
def indent_size(self):
return self._indent
def __enter__(self):
self._indent += 1
return self
def __exit__(self, *args):
assert self._indent > 0
self._indent -= 1
def write_line(self, line, indent=None):
if indent == None:
indent = self._indent
for i in xrange(indent):
self.file.write(" ")
print >> self.file, line
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 0
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, in_block=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume()))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
body.chunks.append(_Text(reader.consume(curly)))
start_brace = reader.consume(2)
line = reader.line
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1 or reader.find("\n", 0, end) != -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1 or reader.find("\n", 0, end) != -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % \
(operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "comment"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % \
line)
block = _ExtendsBlock(suffix)
elif operator == "import":
if not suffix:
raise ParseError("import missing statement on line %d" % \
line)
block = _Statement(contents)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % \
line)
block = _IncludeBlock(suffix, reader)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
block_body = _parse(reader, operator)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % \
line)
block = _ApplyBlock(suffix, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body)
else:
block = _ControlBlock(contents, block_body)
body.chunks.append(block)
continue
else:
raise ParseError("unknown operator: %r" % operator)
| apache-2.0 |
loulich/Couchpotato | libs/requests/packages/urllib3/request.py | 853 | 5751 | try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.')
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| gpl-3.0 |
amirrpp/django-oscar | tests/unit/wishlist_tests.py | 69 | 1388 | from django.test import TestCase
from oscar.apps.wishlists.models import WishList
from oscar.core.compat import get_user_model
User = get_user_model()
class TestAWishlist(TestCase):
def test_can_generate_a_random_key(self):
key = WishList.random_key(6)
self.assertTrue(len(key) == 6)
class TestAPublicWishList(TestCase):
def setUp(self):
self.wishlist = WishList(visibility=WishList.PUBLIC)
def test_is_visible_to_anyone(self):
user = User()
self.assertTrue(self.wishlist.is_allowed_to_see(user))
class TestASharedWishList(TestCase):
def setUp(self):
self.wishlist = WishList(visibility=WishList.SHARED)
def test_is_visible_to_anyone(self):
user = User()
self.assertTrue(self.wishlist.is_allowed_to_see(user))
class TestAPrivateWishList(TestCase):
def setUp(self):
self.owner = User(id=1)
self.another_user = User(id=2)
self.wishlist = WishList(owner=self.owner)
def test_is_visible_only_to_its_owner(self):
self.assertTrue(self.wishlist.is_allowed_to_see(self.owner))
self.assertFalse(self.wishlist.is_allowed_to_see(self.another_user))
def test_can_only_be_edited_by_its_owner(self):
self.assertTrue(self.wishlist.is_allowed_to_edit(self.owner))
self.assertFalse(self.wishlist.is_allowed_to_edit(self.another_user))
| bsd-3-clause |
yzl0083/orange | Orange/OrangeCanvas/scheme/tests/test_annotations.py | 26 | 1426 | """
Tests for scheme annotations.
"""
from ...gui import test
from .. import SchemeArrowAnnotation, SchemeTextAnnotation
class TestAnnotations(test.QCoreApplication):
def test_arrow(self):
arrow = SchemeArrowAnnotation((0, 0), (10, 10))
self.assertTrue(arrow.start_pos == (0, 0))
self.assertTrue(arrow.end_pos == (10, 10))
def count():
count.i += 1
count.i = 0
arrow.geometry_changed.connect(count)
arrow.set_line((10, 10), (0, 0))
self.assertTrue(arrow.start_pos == (10, 10))
self.assertTrue(arrow.end_pos == (0, 0))
self.assertTrue(count.i == 1)
def test_text(self):
text = SchemeTextAnnotation((0, 0, 10, 100), "--")
self.assertEqual(text.rect, (0, 0, 10, 100))
self.assertEqual(text.text, "--")
def count():
count.i += 1
count.i = 0
text.geometry_changed.connect(count)
text.set_rect((9, 9, 30, 30))
self.assertEqual(text.rect, (9, 9, 30, 30))
self.assertEqual(count.i == 1)
text.rect = (4, 4, 4, 4)
self.assertEqual(count.i == 2)
count.i = 0
text.text_changed.connect(count)
text.set_text("...")
self.assertEqual(text.text, "...")
self.assertTrue(count.i == 1)
text.text = '=='
self.assertEqual(text.text, "--")
self.assertTrue(count.i == 2)
| gpl-3.0 |
devosoft/Pepper | tests/preprocessor_test.py | 1 | 14658 | # This file is a part of the Pepper project, https://github.com/devosoft/Pepper
# (C) Michigan State University, under the MIT License
# See LICENSE.txt for more information
from bunch import Bunch
from pathlib import Path
from unittest.mock import MagicMock
import os
import pytest
import shutil
import subprocess
import pepper.symbol_table as symtable
import pepper.abstract_symbol_tree as ast
import pepper.preprocessor as preprocessor
SOURCE_FILE_DIRECTORY = "./tests/test_data/"
EXAMPLE_OUTPUT_DIRECTORY = "./tests/test_data/output_examples/"
class FakeFile():
def __init__(self, name, contents=None):
self.name = name
self.contents = contents if contents else []
self.index = 0
def readline(self):
if self.index >= len(self.contents):
return ""
else:
self.index += 1
return self.contents[self.index-1]
def close(self):
pass
def write(self, lines):
self.contents.extend(lines.split("\n"))
def get_contents(self):
return "\n".join(self.contents)
def name(self):
return self.name
class FakeArgs():
def __init__(self):
self.input_file = None
self.output_file = None
self.trigger_internal_error = False
self.sys_include = False
self.debug = True
def preprocess_and_compare_functionally(source, reference, prebuilt_args_object=None):
args = None
if prebuilt_args_object:
args = prebuilt_args_object
else:
args = FakeArgs()
if args.input_file is None:
fake_input_file = None
with open(SOURCE_FILE_DIRECTORY + source, 'r') as sourcefile:
fake_input_file = FakeFile(f"{SOURCE_FILE_DIRECTORY}{source}", sourcefile.readlines())
args.input_file = fake_input_file
fake_output_file = FakeFile(f"{source}.fake_output")
args.output_file = fake_output_file
preprocessor.main(args)
if isinstance(reference, FakeFile):
assert(args.output_file.contents == reference.contents)
else:
with open(EXAMPLE_OUTPUT_DIRECTORY + reference) as reference_file:
assert(args.output_file.get_contents() == reference_file.read())
def reset_state():
symtable.TABLE = dict()
symtable.FILE_STACK = []
symtable.IFDEF_STACK = []
symtable.SYSTEM_INCLUDE_PATHS = []
symtable.EXPANDED_MACRO = False
symtable.TRIGGER_INTERNAL_ERROR = False
symtable.IF_COUNT = 0
symtable.IGNORED_FILE_PATHS = set()
def preprocess_and_compare(source, reference, tmpdir, supportfiles=[], optional_args=[]):
test_dir = tmpdir.mkdir('preprocessor')
# copy the test file to the test directory
shutil.copy(SOURCE_FILE_DIRECTORY + source, test_dir.realpath())
for entry in supportfiles:
shutil.copy(SOURCE_FILE_DIRECTORY + entry, test_dir.realpath())
call = ["Pepper"] + optional_args + [f"{test_dir.realpath()}/{source}"]
process = subprocess.run(call, timeout=2, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert(process.returncode == 0)
with open(f'{EXAMPLE_OUTPUT_DIRECTORY}{reference}', 'r') as expected_file:
with open(f"{test_dir.realpath()}/{source}.preprocessed.cc") as outfile:
assert(outfile.read() == expected_file.read())
assert(not process.stderr)
class TestUnit:
def setup_method(self, method):
reset_state()
def test_comments(self, tmpdir):
preprocess_and_compare_functionally('comments.cpp', 'comments.cpp.preprocessed.cc')
def test_nested_macro_expansion(self, tmpdir):
preprocess_and_compare_functionally('multiple_macros.cpp',
'multiple_macros.cpp.preprocessed.cc')
def test_function_and_macro_calls(self, tmpdir):
preprocess_and_compare_functionally('function_and_macro_calls.cpp',
'function_and_macro_calls.cpp.preprocessed.cc')
def test_function_and_macro_calls_2(self, tmpdir):
preprocess_and_compare_functionally('function_like_macro_2.cpp',
'function_like_macro_2.cpp.preprocessed.cc')
def test_basic_function_with_defaults_refactored(self, tmpdir):
preprocess_and_compare("file_include.cpp",
"preprocessed_file_include.cpp",
tmpdir,
['SomeFile.h', 'SomeOtherFile.h'])
def test_ifdef_handling(self, tmpdir):
preprocess_and_compare_functionally('ifdef.cpp', 'ifdef.cpp.preprocessed.cc')
def test_for_loop_not_breaking_macros(self, tmpdir):
preprocess_and_compare_functionally("for_loop.cpp", "for_loop.cpp.preprocessed.cc")
def test_variadic_macro_expansion(self, tmpdir):
ifile_contents = [
"#define somemacro(a, b, moar...) a + b + mult(moar)\n",
"int main {\n",
" cout << somemacro(1, 2, 3, 4, 5, 6) << endl;\n",
"}",
]
expected_out = [
"// Macro somemacro with args ['a', 'b', 'moar...'] expanding to 'a + b + mult(moar)'", # NOQA
"int main {",
" cout << 1 + 2 + mult(3, 4, 5, 6) << endl;",
"}",
"",
]
args = FakeArgs()
args.input_file = FakeFile('variadic_expand.cc', ifile_contents)
expected_out_file = FakeFile('whatever', expected_out)
preprocess_and_compare_functionally(None, expected_out_file, args)
def test_system_file_include(self, tmpdir):
system_dir = tmpdir.mkdir('system_include_path')
args = FakeArgs()
args.sys_include = [system_dir.realpath()]
# copy some files to the tmpdir, then run search for them
shutil.copy(SOURCE_FILE_DIRECTORY + 'SomeFile.h', f"{system_dir.realpath()}/SomeFile.h")
shutil.copy(SOURCE_FILE_DIRECTORY + 'SomeOtherFile.h',
f"{system_dir.realpath()}/SomeOtherFile.h")
preprocess_and_compare_functionally('systemish_include.cpp',
'systemish_include.cpp.preprocessed.cc',
args)
def test_include_path_search(self, tmpdir):
# copy some files to the tmpdir, then run search for them
test_dir = tmpdir.mkdir('include_path')
shutil.copy(SOURCE_FILE_DIRECTORY + 'SomeFile.h', test_dir.realpath())
symtable.SYSTEM_INCLUDE_PATHS.append(str(test_dir.realpath()))
found = ast.PreprocessorIncludeNode.search_system_includes('SomeFile.h')
expected = str(Path(f"{test_dir.realpath()}/{'SomeFile.h'}"))
assert(found and (found == expected))
try:
found = ast.PreprocessorIncludeNode.search_system_includes('FileThatDoesNotExist.h')
assert(False and "There should have been an OSError!")
except OSError as err:
assert("Could not find file FileThatDoesNotExist.h in defined system include paths:" in str(err)) # NOQA
def test_error_raised_for_bad_syntax(self, tmpdir):
test_dir = tmpdir.mkdir('preprocessor')
# copy the test file to the test directory
shutil.copy(SOURCE_FILE_DIRECTORY + "error.cpp", test_dir.realpath())
exception_raised = False
try:
# doesn't actually matter what the reference is
preprocess_and_compare_functionally('error.cpp', 'preprocessed_file_include.cpp')
assert(False and "Should have had an exception thrown!")
except symtable.PepperSyntaxError as err:
exception_raised = True
assert(exception_raised)
def test_internal_error_handling(self, tmpdir):
args = FakeArgs()
args.trigger_internal_error = True
exception_raised = False
try:
preprocess_and_compare_functionally('function_like_macro_2.cpp',
'function_like_macro_2.cpp.preprocessed.cc',
args)
assert(False and "Should have had an exception thrown!")
except symtable.PepperInternalError as err:
exception_raised = True
assert(exception_raised)
def test_if_basic_expressions(self, tmpdir):
preprocess_and_compare_functionally('if_expressions.cpp',
'if_expressions.cpp.preprocessed.cc')
def test_if_macro_calls(self, tmpdir):
preprocess_and_compare_functionally('if_macro_expressions.cpp',
'if_macro_expressions.cpp.preprocessed.cc')
def test_if_with_file_includes(self, tmpdir):
preprocess_and_compare("file_include_if.cpp", "file_include_if.preprocessed.cc",
tmpdir,
['SomeFile.h', 'SomeOtherFile.h'])
def test_error_raised_if_token_syntax(self, tmpdir):
in_contents = [
"#define M1(a,b) a + b\n",
"#if M1(12.2, 12.1 *0.23)\n",
"#endif"
]
expected = [""]
args = FakeArgs()
args.input_file = FakeFile("type_error.cc", in_contents)
expected_file = FakeFile("whatever", expected)
exception_raised = False
try:
# doesn't actually matter what the reference is
preprocess_and_compare_functionally(None, expected_file, args)
assert(False and "Should have had an exception thrown!")
except symtable.PepperSyntaxError as err:
exception_raised = True
assert(exception_raised)
def test_error_raised_macro_eval_syntax(self, tmpdir):
in_contents = [
"#define M1(a,b) a and or and b\n",
"#if M1(1, 2)\n",
"#endif"
]
expected = [""]
args = FakeArgs()
args.input_file = FakeFile("macro_error.cc", in_contents)
expected_file = FakeFile("whatever", expected)
exception_raised = False
try:
# doesn't actually matter what the reference is
preprocess_and_compare_functionally(None, expected_file, args)
assert(False and "Should have had an exception thrown!")
except symtable.PepperSyntaxError as err:
exception_raised = True
assert(exception_raised)
def test_error_directive_raised(self, tmpdir):
in_contents = [
"#ifndef __M1__\n",
'#error "This constant should be present!"\n',
"#endif"
]
expected = [""]
args = FakeArgs()
args.input_file = FakeFile("macro_error.cc", in_contents)
expected_file = FakeFile("whatever", expected)
exception_raised = False
try:
# doesn't actually matter what the reference is
preprocess_and_compare_functionally(None, expected_file, args)
assert(False and "Should have had an exception thrown!")
except ast.PreprocessorErrorNode.PepperCompileError as err:
exception_raised = True
assert(exception_raised)
def test_error_directive_not_raised(self, tmpdir):
in_contents = [
"#ifdef __M1__\n",
'#error "This constant shouldnt be present!"\n',
"#endif"
]
expected = ["// endif expression ", ""]
args = FakeArgs()
args.input_file = FakeFile("macro_error.cc", in_contents)
expected_file = FakeFile("whatever", expected)
preprocess_and_compare_functionally(None, expected_file, args)
def test_warning_directive_raised(self, tmpdir):
test_dir = tmpdir.mkdir('preprocessor')
source = "warning.cpp"
reference = source + ".preprocessed.cc"
shutil.copy(SOURCE_FILE_DIRECTORY + source, test_dir.realpath())
call = ["Pepper"] + [f"{test_dir.realpath()}/{source}"]
process = subprocess.run(call, timeout=2, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert(process.returncode == 0)
with open(f'{EXAMPLE_OUTPUT_DIRECTORY}{reference}', 'r') as expected_file:
with open(f"{test_dir.realpath()}/{source}.preprocessed.cc") as outfile:
assert(outfile.read() == expected_file.read())
assert(process.stderr ==
b'\nwarning.cpp:4 warning: "WARN"\n\nwarning.cpp:8 warning: "WARN"\n')
def test_warning_directive_not_raised(self, tmpdir):
preprocess_and_compare("no_warning.cpp", "no_warning.cpp.preprocessed.cc",
tmpdir)
def test_pragma_once_handler(self, tmpdir):
assert "once" in symtable.PRAGMA_HANDLERS
# should techincally be 'r' but then it'd fail
symtable.FILE_STACK.append(open('./SomeFile.h', 'w'))
symtable.PRAGMA_HANDLERS["once"]()
assert "./SomeFile.h" in symtable.IGNORED_FILE_PATHS
symtable.FILE_STACK = [Bunch(name="BaseFile.h")]
include_node = ast.PreprocessorIncludeNode(["'SomeFile.h'"], False)
include_node.preprocess()
assert len(symtable.FILE_STACK) == 1
# Teardown
os.remove('./SomeFile.h')
def test_pragma_with_arguments(self, tmpdir):
mock_handler = MagicMock()
symtable.PRAGMA_HANDLERS['test'] = mock_handler
in_contents = [
"#pragma test ('ragnlebalrgle testing wooo')"
]
expected = ["", ""]
args = FakeArgs()
args.input_file = FakeFile("arged_pragma.cc", in_contents)
expected_file = FakeFile("whatever", expected)
preprocess_and_compare_functionally(None, expected_file, args)
assert len(mock_handler.mock_calls) == 1
def test_unknown_pragma(self, tmpdir):
in_contents = [
"#pragma unknwon ('ragnlebalrgle testing wooo')"
]
expected = ["", ""]
args = FakeArgs()
args.input_file = FakeFile("arged_pragma.cc", in_contents)
expected_file = FakeFile("whatever", expected)
with pytest.raises(symtable.PepperInternalError):
preprocess_and_compare_functionally(None, expected_file, args)
def test_pragma_preprocessor(self, tmpdir):
preprocess_and_compare("pragma_base.cc", "pragma_base.cc.preprocessed.cc",
tmpdir,
['pragma_include.h'])
class TestSystem:
def test_basic_function(self, tmpdir):
preprocess_and_compare("file_include.cpp",
"preprocessed_file_include.cpp",
tmpdir,
['SomeFile.h', 'SomeOtherFile.h'])
| mit |
knossos-project/knossos_python_tools | setup.py | 3 | 2581 | #!/usr/bin/env python
import os
import sys
import setuptools
from setuptools import find_packages, setup, Extension
from pkg_resources import parse_version
# Setuptools >=18.0 is needed for Cython to work correctly.
if parse_version(setuptools.__version__) < parse_version('18.0'):
print('\nYour installed Setuptools version is too old.')
print('Please upgrade it to at least 18.0, e.g. by running')
print('$ python{} -m pip install --upgrade setuptools'.format(sys.version_info[0]))
print('If this fails, try additionally passing the "--user" switch to the install command, or use Anaconda.')
sys.stdout.flush()
sys.exit(1)
try:
import numpy
except ImportError:
print("Numpy not found. Please install Numpy manually: http://www.scipy.org/install.html")
sys.stdout.flush()
sys.exit(1)
extensions = [Extension(
"knossos_utils.mergelist_tools",
["knossos_utils/mergelist_tools.pyx"],
include_dirs=[numpy.get_include()],
language="c++",
extra_compile_args=["-std=c++0x", "-include", "cmath"])
]
install_requires = [
"cython>=0.23",
"h5py>=2.5",
"imageio",
"numpy>=1.10",
"scipy>=0.16",
"networkx>=1.11",
"requests>=2.12",
"matplotlib",
"Pillow"
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="knossos_utils",
version="0.1",
description="Tools for generating or manipulating knossos datasets and annotation files",
author="Sven Dorkenwald, KNOSSOS team",
author_email="[email protected]",
url="https://github.com/knossos-project/knossos_utils",
license="GPL",
long_description=read("README.md"),
packages=find_packages(),
data_files=[("", ["LICENSE"])],
ext_modules=extensions,
setup_requires=[
"cython>=0.23",
],
install_requires=install_requires,
extras_require={
"snappy": ["python-snappy>=0.5"],
# "skeletopyze": only needed for importing skeletopyze skeletons. See https://github.com/funkey/skeletopyze
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| gpl-2.0 |
unsiloai/syntaxnet-ops-hack | tensorflow/contrib/keras/python/keras/regularizers.py | 58 | 2778 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in regularizers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.generic_utils import serialize_keras_object
class Regularizer(object):
"""Regularizer base class.
"""
def __call__(self, x):
return 0.
@classmethod
def from_config(cls, config):
return cls(**config)
class L1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
Arguments:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
regularization = 0.
if self.l1:
regularization += K.sum(self.l1 * K.abs(x))
if self.l2:
regularization += K.sum(self.l2 * K.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1), 'l2': float(self.l2)}
# Aliases.
def l1(l=0.01):
return L1L2(l1=l)
def l2(l=0.01):
return L1L2(l2=l)
def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
return L1L2(l1=l1, l2=l2)
def serialize(regularizer):
return serialize_keras_object(regularizer)
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:', identifier)
| apache-2.0 |
raw1z/ultisnips | pythonx/UltiSnips/snippet/parsing/_base.py | 21 | 2505 | #!/usr/bin/env python
# encoding: utf-8
"""Common functionality of the snippet parsing codes."""
from UltiSnips.position import Position
from UltiSnips.snippet.parsing._lexer import tokenize, TabStopToken
from UltiSnips.text_objects import TabStop
from UltiSnips.text_objects import Mirror
from UltiSnips.snippet.parsing._lexer import MirrorToken
def resolve_ambiguity(all_tokens, seen_ts):
"""$1 could be a Mirror or a TabStop.
This figures this out.
"""
for parent, token in all_tokens:
if isinstance(token, MirrorToken):
if token.number not in seen_ts:
seen_ts[token.number] = TabStop(parent, token)
else:
Mirror(parent, seen_ts[token.number], token)
def tokenize_snippet_text(snippet_instance, text, indent,
allowed_tokens_in_text, allowed_tokens_in_tabstops,
token_to_textobject):
"""Turns 'text' into a stream of tokens and creates the text objects from
those tokens that are mentioned in 'token_to_textobject' assuming the
current 'indent'.
The 'allowed_tokens_in_text' define which tokens will be recognized
in 'text' while 'allowed_tokens_in_tabstops' are the tokens that
will be recognized in TabStop placeholder text.
"""
seen_ts = {}
all_tokens = []
def _do_parse(parent, text, allowed_tokens):
"""Recursive function that actually creates the objects."""
tokens = list(tokenize(text, indent, parent.start, allowed_tokens))
for token in tokens:
all_tokens.append((parent, token))
if isinstance(token, TabStopToken):
ts = TabStop(parent, token)
seen_ts[token.number] = ts
_do_parse(ts, token.initial_text,
allowed_tokens_in_tabstops)
else:
klass = token_to_textobject.get(token.__class__, None)
if klass is not None:
klass(parent, token)
_do_parse(snippet_instance, text, allowed_tokens_in_text)
return all_tokens, seen_ts
def finalize(all_tokens, seen_ts, snippet_instance):
"""Adds a tabstop 0 if non is in 'seen_ts' and brings the text of the
snippet instance into Vim."""
if 0 not in seen_ts:
mark = all_tokens[-1][1].end # Last token is always EndOfText
m1 = Position(mark.line, mark.col)
TabStop(snippet_instance, 0, mark, m1)
snippet_instance.replace_initial_text()
| gpl-3.0 |
pizzathief/scipy | scipy/integrate/tests/test_quadrature.py | 2 | 8426 | import numpy as np
from numpy import cos, sin, pi
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, suppress_warnings)
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumtrapz, quad, simps, fixed_quad,
AccuracyWarning)
class TestFixedQuad(object):
def test_scalar(self):
n = 4
func = lambda x: x**(2*n - 1)
expected = 1/(2*n)
got, _ = fixed_quad(func, 0, 1, n=n)
# quadrature exact for this input
assert_allclose(got, expected, rtol=1e-12)
def test_vector(self):
n = 4
p = np.arange(1, 2*n)
func = lambda x: x**p[:,None]
expected = 1/(p + 1)
got, _ = fixed_quad(func, 0, 1, n=n)
assert_allclose(got, expected, rtol=1e-12)
class TestQuadrature(object):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with suppress_warnings() as sup:
sup.filter(AccuracyWarning, "divmax .4. exceeded")
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
def test_simps(self):
y = np.arange(17)
assert_equal(simps(y), 128)
assert_equal(simps(y, dx=0.5), 64)
assert_equal(simps(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simps(y, x=x, even='avg'), 13.875)
assert_equal(simps(y, x=x, even='first'), 13.75)
assert_equal(simps(y, x=x, even='last'), 14)
class TestCumtrapz(object):
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumtrapz(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumtrapz(y, x, initial=3.45, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumtrapz(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumtrapz(y, x=x[:y.shape[axis]], axis=axis, initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumtrapz(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, initial=1.23)
y_expected = [1.23, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumtrapz(y, dx=3, initial=1.23)
y_expected = [1.23, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
| bsd-3-clause |
epssy/hue | apps/jobbrowser/src/jobbrowser/models.py | 5 | 22542 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import lxml.html
import re
import urllib2
from urlparse import urlparse, urlunparse
from django.core.urlresolvers import reverse
from desktop.lib.view_util import format_duration_in_millis
from desktop.lib import i18n
from django.utils.html import escape
from filebrowser.views import location_to_url
from hadoop import job_tracker
from hadoop import confparse
from hadoop.api.jobtracker.ttypes import JobNotFoundException
import hadoop.api.jobtracker.ttypes as ttypes
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
from jobbrowser.conf import DISABLE_KILLING_JOBS
LOGGER = logging.getLogger(__name__)
def can_view_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-view-job', '')
return acl == '*' or username in acl.split(',')
def can_modify_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-modify-job', '')
return acl == '*' or username in acl.split(',')
def get_acls(job):
if job.is_mr2:
return job.acls
else:
return job.full_job_conf
def can_kill_job(self, user):
if DISABLE_KILLING_JOBS.get():
return False
if self.status.lower() not in ('running', 'pending', 'accepted'):
return False
if user.is_superuser:
return True
if can_modify_job(user.username, self):
return True
return user.username == self.user
class JobLinkage(object):
"""
A thin representation of a job, without much of the details.
Its purpose is to wrap a JobID to allow us to get further
information from Hadoop, without instantiating a full Job object
(which requires talking to Hadoop).
"""
def __init__(self, jobtracker, jobid):
"""
JobLinkage(jobtracker, jobid) -> JobLinkage
The jobid is the jobid string (not the thrift jobid)
"""
self._jobtracker = jobtracker
self.jobId = jobid
self.jobId_short = "_".join(jobid.split("_")[-2:])
self.is_mr2 = False
def get_task(self, task_id):
"""Retrieve a TaskInProgress from hadoop."""
ttask = self._jobtracker.get_task(
self._jobtracker.thriftjobid_from_string(self.jobId),
self._jobtracker.thrifttaskid_from_string(task_id))
return Task(ttask, self._jobtracker)
class Job(JobLinkage):
"""
Creates a Job instance pulled from the job tracker Thrift interface.
"""
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
@staticmethod
def from_id(jt, jobid, is_finished=False):
"""
Returns a Job instance given a job tracker interface and an id. The job tracker interface is typically
located in request.jt.
"""
try:
thriftjob = jt.get_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException:
try:
thriftjob = jt.get_retired_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException, e:
raise PopupException(_("Could not find job with id %(jobid)s.") % {'jobid': jobid}, detail=e)
return Job(jt, thriftjob)
@staticmethod
def from_thriftjob(jt, thriftjob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that job tracker interface.
The job tracker interface is typically located in request.jt
"""
return Job(jt, thriftjob)
def __init__(self, jt, thriftJob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that
job tracker interface. The job tracker interface is typically located in request.jt
"""
JobLinkage.__init__(self, jt, thriftJob.jobID.asString)
self.jt = jt
self.job = thriftJob
self.tasks = []
if self.job.tasks is not None:
self.tasks = TaskList.from_thriftTaskList(self.job.tasks, jt)
self.task_map = dict( (task.taskId, task) for task in self.tasks )
self._counters = None
self._conf_keys = None
self._full_job_conf = None
self._init_attributes()
self.is_retired = hasattr(thriftJob, 'is_retired')
self.is_mr2 = False
self.applicationType = 'MR2'
@property
def counters(self):
if self.is_retired:
self._counters = {}
elif self._counters is None:
rollups = self.jt.get_job_counter_rollups(self.job.jobID)
# We get back a structure with counter lists for maps, reduces, and total
# and we need to invert this
def aggregate_counters(ctrs_from_jt, key, target):
for group in ctrs_from_jt.groups:
if group.name not in target:
target[group.name] = {
'name': group.name,
'displayName': group.displayName,
'counters': {}
}
agg_counters = target[group.name]['counters']
for counter in group.counters.itervalues():
if counter.name not in agg_counters:
agg_counters[counter.name] = {
'name': counter.name,
'displayName': counter.displayName,
}
agg_counters[counter.name][key] = counter.value
self._counters = {}
aggregate_counters(rollups.mapCounters, "map", self._counters)
aggregate_counters(rollups.reduceCounters, "reduce", self._counters)
aggregate_counters(rollups.jobCounters, "total", self._counters)
return self._counters
@property
def conf_keys(self):
if self._conf_keys is None:
self._initialize_conf_keys()
return self._conf_keys
@property
def full_job_conf(self):
if self._full_job_conf is None:
self._initialize_conf_keys()
return self._full_job_conf
def _init_attributes(self):
self.queueName = i18n.smart_unicode(self.job.profile.queueName)
self.jobName = i18n.smart_unicode(self.job.profile.name)
self.user = i18n.smart_unicode(self.job.profile.user)
self.mapProgress = self.job.status.mapProgress
self.reduceProgress = self.job.status.reduceProgress
self.setupProgress = self.job.status.setupProgress
self.cleanupProgress = self.job.status.cleanupProgress
if self.job.desiredMaps == 0:
maps_percent_complete = 0
else:
maps_percent_complete = int(round(float(self.job.finishedMaps) / self.job.desiredMaps * 100))
self.desiredMaps = self.job.desiredMaps
if self.job.desiredReduces == 0:
reduces_percent_complete = 0
else:
reduces_percent_complete = int(round(float(self.job.finishedReduces) / self.job.desiredReduces * 100))
self.desiredReduces = self.job.desiredReduces
self.maps_percent_complete = maps_percent_complete
self.finishedMaps = self.job.finishedMaps
self.finishedReduces = self.job.finishedReduces
self.reduces_percent_complete = reduces_percent_complete
self.startTimeMs = self.job.startTime
self.startTimeFormatted = format_unixtime_ms(self.job.startTime)
self.launchTimeMs = self.job.launchTime
self.launchTimeFormatted = format_unixtime_ms(self.job.launchTime)
self.finishTimeMs = self.job.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.job.finishTime)
self.status = self.job.status.runStateAsString
self.priority = self.job.priorityAsString
self.jobFile = self.job.profile.jobFile
finishTime = self.job.finishTime
if finishTime == 0:
finishTime = datetime.datetime.now()
else:
finishTime = datetime.datetime.fromtimestamp(finishTime / 1000)
self.duration = finishTime - datetime.datetime.fromtimestamp(self.job.startTime / 1000)
diff = int(finishTime.strftime("%s")) * 1000 - self.startTimeMs
self.durationFormatted = format_duration_in_millis(diff)
self.durationInMillis = diff
def kill(self):
self.jt.kill_job(self.job.jobID)
def get_task(self, id):
try:
return self.task_map[id]
except KeyError:
return JobLinkage.get_task(self, id)
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
"""
Filters the tasks of the job.
Pass in task_type and task_state as sets; None for "all".
task_text is used to search in the state, mostRecentState, and the ID.
"""
assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
def is_good_match(t):
if task_types is not None:
if t.task.taskID.taskTypeAsString.lower() not in task_types:
return False
if task_states is not None:
if t.state.lower() not in task_states:
return False
if task_text is not None:
tt_lower = task_text.lower()
if tt_lower not in t.state.lower() and tt_lower not in t.mostRecentState.lower() and tt_lower not in t.task.taskID.asString.lower():
return False
return True
return [ t for t in self.tasks if is_good_match(t) ]
def _initialize_conf_keys(self):
if self.is_retired:
self._conf_keys = {}
self._full_job_conf = {}
else:
conf_keys = [
'mapred.mapper.class',
'mapred.reducer.class',
'mapred.input.format.class',
'mapred.output.format.class',
'mapred.input.dir',
'mapred.output.dir',
]
jobconf = get_jobconf(self.jt, self.jobId)
self._full_job_conf = jobconf
self._conf_keys = {}
for k, v in jobconf.iteritems():
if k in conf_keys:
self._conf_keys[dots_to_camel_case(k)] = v
class TaskList(object):
@staticmethod
def select(jt, jobid, task_types, task_states, text, count, offset):
"""
select(jt, jobid, task_types, task_states, text, count, offset) -> TaskList
Retrieve a TaskList from Hadoop according to the given criteria.
task_types is a set of job_tracker.VALID_TASK_TYPES. A value to None means everything.
task_states is a set of job_tracker.VALID_TASK_STATES. A value to None means everything.
"""
assert task_types is None or job_tracker.VALID_TASK_TYPES.issuperset(task_types)
assert task_states is None or job_tracker.VALID_TASK_STATES.issuperset(task_states)
if task_types is None:
task_types = job_tracker.VALID_TASK_TYPES
if task_states is None:
task_states = job_tracker.VALID_TASK_STATES
tjobid = jt.thriftjobid_from_string(jobid)
thrift_list = jt.get_task_list(tjobid, task_types, task_states, text, count, offset)
return TaskList.from_thriftTaskList(thrift_list, jt)
@staticmethod
def from_thriftTaskList(thrift_task_list, jobtracker):
"""TaskList.from_thriftTaskList(thrift_task_list, jobtracker) -> TaskList
"""
if thrift_task_list is None:
return None
return TaskList(thrift_task_list, jobtracker)
def __init__(self, tasklist, jobtracker):
self.__tasklist = tasklist # The thrift task list
self.__jt = jobtracker
self.__init_attributes()
def __init_attributes(self):
self.__tasksSoFar = [ Task(t, self.__jt) for t in self.__tasklist.tasks ]
self.__nTotalTasks = self.__tasklist.numTotalTasks
def __iter__(self):
return self.__tasksSoFar.__iter__()
def __len__(self):
return len(self.__tasksSoFar)
def __getitem__(self, key):
return self.__tasksSoFar[key]
@property
def tasks(self):
return self.__tasksSoFar
@property
def numTotalTasks(self):
return self.__nTotalTasks
class Task(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
def __init__(self, task, jt):
self.task = task
self.jt = jt
self._init_attributes()
self.attempt_map = {}
for id, attempt in self.task.taskStatuses.iteritems():
ta = TaskAttempt(attempt, task=self)
self.attempt_map[id] = ta
@property
def attempts(self):
return self.attempt_map.values()
def _init_attributes(self):
self.taskType = self.task.taskID.taskTypeAsString
self.taskId = self.task.taskID.asString
self.taskId_short = "_".join(self.taskId.split("_")[-2:])
self.startTimeMs = self.task.startTime
self.startTimeFormatted = format_unixtime_ms(self.task.startTime)
self.execStartTimeMs = self.task.execStartTime
self.execStartTimeFormatted = format_unixtime_ms(self.task.execStartTime)
self.execFinishTimeMs = self.task.execFinishTime
self.execFinishTimeFormatted = format_unixtime_ms(self.task.execFinishTime)
self.state = self.task.state
assert self.state in job_tracker.VALID_TASK_STATES
self.progress = self.task.progress
self.taskId = self.task.taskID.asString
self.jobId = self.task.taskID.jobID.asString
self.taskAttemptIds = self.task.taskStatuses.keys()
self.mostRecentState = self.task.mostRecentState
self.diagnosticMap = self.task.taskDiagnosticData
self.counters = self.task.counters
self.failed = self.task.failed
self.complete = self.task.complete
self.is_mr2 = False
def get_attempt(self, id):
"""
Returns a TaskAttempt for a given id.
"""
return self.attempt_map[id]
class TaskAttempt(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve task["foo"] as task.foo.
"""
return getattr(self, item)
def __init__(self, task_attempt, task):
assert task_attempt is not None
self.task_attempt = task_attempt
self.task = task
self._init_attributes();
def _init_attributes(self):
self.taskType = self.task_attempt.taskID.taskID.taskTypeAsString
self.attemptId = self.task_attempt.taskID.asString
self.attemptId_short = "_".join(self.attemptId.split("_")[-2:])
self.startTimeMs = self.task_attempt.startTime
self.startTimeFormatted = format_unixtime_ms(self.task_attempt.startTime)
self.finishTimeMs = self.task_attempt.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.task_attempt.finishTime)
self.state = self.task_attempt.stateAsString.lower()
self.taskTrackerId = self.task_attempt.taskTracker
self.phase = self.task_attempt.phaseAsString
self.progress = self.task_attempt.progress
self.outputSize = self.task_attempt.outputSize
self.shuffleFinishTimeMs = self.task_attempt.shuffleFinishTime
self.shuffleFinishTimeFormatted = format_unixtime_ms(self.task_attempt.shuffleFinishTime)
self.sortFinishTimeMs = self.task_attempt.sortFinishTime
self.sortFinishTimeFormatted = format_unixtime_ms(self.task_attempt.sortFinishTime)
self.mapFinishTimeMs = self.task_attempt.mapFinishTime # DO NOT USE, NOT VALID IN 0.20
self.mapFinishTimeFormatted = format_unixtime_ms(self.task_attempt.mapFinishTime)
self.counters = self.task_attempt.counters
self.is_mr2 = False
def get_tracker(self):
try:
tracker = Tracker.from_name(self.task.jt, self.taskTrackerId)
return tracker
except ttypes.TaskTrackerNotFoundException, e:
LOGGER.warn("Tracker %s not found: %s" % (self.taskTrackerId, e))
if LOGGER.isEnabledFor(logging.DEBUG):
all_trackers = self.task.jt.all_task_trackers()
for t in all_trackers.trackers:
LOGGER.debug("Available tracker: %s" % (t.trackerName,))
raise ttypes.TaskTrackerNotFoundException(
_("Cannot look up TaskTracker %(id)s.") % {'id': self.taskTrackerId})
def get_task_log(self):
"""
get_task_log(task_id) -> (stdout_text, stderr_text, syslog_text)
Retrieve the task log from the TaskTracker, at this url:
http://<tracker_host>:<port>/tasklog?taskid=<attempt_id>
Optional query string:
&filter=<source> : where <source> is 'syslog', 'stdout', or 'stderr'.
&start=<offset> : specify the start offset of the log section, when using a filter.
&end=<offset> : specify the end offset of the log section, when using a filter.
"""
tracker = self.get_tracker()
url = urlunparse(('http',
'%s:%s' % (tracker.host, tracker.httpPort),
'tasklog',
None,
'attemptid=%s' % (self.attemptId,),
None))
LOGGER.info('Retrieving %s' % (url,))
try:
data = urllib2.urlopen(url)
except urllib2.URLError:
raise urllib2.URLError(_("Cannot retrieve logs from TaskTracker %(id)s.") % {'id': self.taskTrackerId})
et = lxml.html.parse(data)
log_sections = et.findall('body/pre')
logs = [section.text or '' for section in log_sections]
if len(logs) < 3:
LOGGER.warn('Error parsing task attempt log for %s at "%s". Found %d (not 3) log sections' %
(self.attemptId, url, len(log_sections)))
err = _("Hue encountered an error while retrieving logs from '%s'.") % (url,)
logs += [err] * (3 - len(logs))
return logs
class Tracker(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo.
"""
return getattr(self, item)
@staticmethod
def from_name(jt, trackername):
return Tracker(jt.task_tracker(trackername))
def __init__(self, thrifttracker):
self.tracker = thrifttracker
self._init_attributes();
def _init_attributes(self):
self.trackerId = self.tracker.trackerName
self.httpPort = self.tracker.httpPort
self.host = self.tracker.host
self.lastSeenMs = self.tracker.lastSeen
self.lastSeenFormatted = format_unixtime_ms(self.tracker.lastSeen)
self.totalVirtualMemory = self.tracker.totalVirtualMemory
self.totalPhysicalMemory = self.tracker.totalPhysicalMemory
self.availableSpace = self.tracker.availableSpace
self.failureCount = self.tracker.failureCount
self.mapCount = self.tracker.mapCount
self.reduceCount = self.tracker.reduceCount
self.maxMapTasks = self.tracker.maxMapTasks
self.maxReduceTasks = self.tracker.maxReduceTasks
self.taskReports = self.tracker.taskReports
self.is_mr2 = False
class Cluster(object):
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
def __init__(self, jt):
self.status = jt.cluster_status()
self._init_attributes();
def _init_attributes(self):
self.mapTasksInProgress = self.status.mapTasks
self.reduceTasksInProgress = self.status.reduceTasks
self.maxMapTasks = self.status.maxMapTasks
self.maxReduceTasks = self.status.maxReduceTasks
self.usedHeapMemory = self.status.usedMemory
self.maxHeapMemory = self.status.maxMemory
self.clusterStartTimeMs = self.status.startTime
self.clusterStartTimeFormatted = format_unixtime_ms(self.status.startTime)
self.identifier = self.status.identifier
self.taskTrackerExpiryInterval = self.status.taskTrackerExpiryInterval
self.totalJobSubmissions = self.status.totalSubmissions
self.state = self.status.stateAsString
self.numActiveTrackers = self.status.numActiveTrackers
self.activeTrackerNames = self.status.activeTrackerNames
self.numBlackListedTrackers = self.status.numBlacklistedTrackers
self.blacklistedTrackerNames = self.status.blacklistedTrackerNames
self.hostname = self.status.hostname
self.httpPort = self.status.httpPort
class LinkJobLogs(object):
@classmethod
def _make_hdfs_links(cls, log):
escaped_logs = escape(log)
return re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
@classmethod
def _make_mr_links(cls, log):
escaped_logs = escape(log)
return re.sub('(job_[0-9_]+(/|\.)?)', LinkJobLogs._replace_mr_link, escaped_logs)
@classmethod
def _make_links(cls, log):
escaped_logs = escape(log)
hdfs_links = re.sub('((?<= |;)/|hdfs://)[^ <&\t;,\n]+', LinkJobLogs._replace_hdfs_link, escaped_logs)
return re.sub('(job_[0-9_]+(/|\.)?)', LinkJobLogs._replace_mr_link, hdfs_links)
@classmethod
def _replace_hdfs_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (location_to_url(match.group(0), strict=False), match.group(0))
except:
LOGGER.exception('failed to replace hdfs links: %s' % (match.groups(),))
return match.group(0)
@classmethod
def _replace_mr_link(self, match):
try:
return '<a href="%s" target="_blank">%s</a>' % (reverse('jobbrowser.views.single_job', kwargs={'job': match.group(0)}), match.group(0))
except:
LOGGER.exception('failed to replace mr links: %s' % (match.groups(),))
return match.group(0)
def get_jobconf(jt, jobid):
"""
Returns a dict representation of the jobconf for the job corresponding
to jobid. filter_keys is an optional list of configuration keys to filter on.
"""
jid = jt.thriftjobid_from_string(jobid)
# This will throw if the the jobconf can't be found
xml_data = jt.get_job_xml(jid)
return confparse.ConfParse(xml_data)
def format_unixtime_ms(unixtime):
"""
Format a unix timestamp in ms to a human readable string
"""
if unixtime:
return str(datetime.datetime.fromtimestamp(unixtime/1000).strftime("%x %X %Z"))
else:
return ""
DOTS = re.compile("\.([a-z])")
def dots_to_camel_case(dots):
"""
Takes a string delimited with periods and returns a camel-case string.
Example: dots_to_camel_case("foo.bar.baz") //returns fooBarBaz
"""
def return_upper(match):
return match.groups()[0].upper()
return str(DOTS.sub(return_upper, dots))
def get_path(hdfs_url):
"""
Returns the path component of an HDFS url.
"""
# urlparse is lame, and only "uses_netloc" for a certain
# set of protocols. So we replace hdfs with gopher:
if hdfs_url.startswith("hdfs://"):
gopher_url = "gopher://" + hdfs_url[7:]
path = urlparse(gopher_url)[2] # path
return path
else:
return hdfs_url
| apache-2.0 |
Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/database_backups/workflows/create_backup.py | 11 | 3194 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class BackupDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
instance = forms.ChoiceField(label=_("Database Instance"))
description = forms.CharField(max_length=512, label=_("Description"),
widget=forms.TextInput(),
required=False,
help_text=_("Optional Backup Description"))
class Meta:
name = _("Details")
help_text_template = \
"project/database_backups/_backup_details_help.html"
def populate_instance_choices(self, request, context):
LOG.info("Obtaining list of instances.")
try:
instances = api.trove.instance_list(request)
except Exception:
instances = []
msg = _("Unable to list database instance to backup.")
exceptions.handle(request, msg)
return [(i.id, i.name) for i in instances]
class SetBackupDetails(workflows.Step):
action_class = BackupDetailsAction
contributes = ["name", "description", "instance"]
class CreateBackup(workflows.Workflow):
slug = "create_backup"
name = _("Backup Database")
finalize_button_name = _("Backup")
success_message = _('Scheduled backup "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:database_backups:index"
default_steps = [SetBackupDetails]
def get_initial(self):
initial = super(CreateBackup, self).get_initial()
initial['instance_id']
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def handle(self, request, context):
try:
LOG.info("Creating backup")
api.trove.backup_create(request,
context['name'],
context['instance'],
context['description'])
return True
except Exception:
LOG.exception("Exception while creating backup")
msg = _('Error creating database backup.')
exceptions.handle(request, msg)
return False
| apache-2.0 |
jcshen007/cloudstack | plugins/hypervisors/baremetal/resources/security_group_agent/security_group_agent/sglib.py | 7 | 7010 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Automatically generated by addcopyright.py at 01/29/2013
import sys, os, time, atexit
import traceback
import subprocess
from signal import SIGTERM
import cherrypy
import copy
class Request(object):
def __init__(self):
self.headers = None
self.body = None
self.method = None
self.query_string = None
@staticmethod
def from_cherrypy_request(creq):
req = Request()
req.headers = copy.copy(creq.headers)
req.body = creq.body.fp.read() if creq.body else None
req.method = copy.copy(creq.method)
req.query_string = copy.copy(creq.query_string) if creq.query_string else None
return req
class ShellError(Exception):
'''shell error'''
class ShellCmd(object):
'''
classdocs
'''
def __init__(self, cmd, workdir=None, pipe=True):
'''
Constructor
'''
self.cmd = cmd
if pipe:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/sh', cwd=workdir)
else:
self.process = subprocess.Popen(cmd, shell=True, executable='/bin/sh', cwd=workdir)
self.stdout = None
self.stderr = None
self.return_code = None
def __call__(self, is_exception=True):
(self.stdout, self.stderr) = self.process.communicate()
if is_exception and self.process.returncode != 0:
err = []
err.append('failed to execute shell command: %s' % self.cmd)
err.append('return code: %s' % self.process.returncode)
err.append('stdout: %s' % self.stdout)
err.append('stderr: %s' % self.stderr)
raise ShellError('\n'.join(err))
self.return_code = self.process.returncode
return self.stdout
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
atexit_hooks = []
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
@staticmethod
def register_atexit_hook(hook):
Daemon.atexit_hooks.append(hook)
@staticmethod
def _atexit():
for hook in Daemon.atexit_hooks:
try:
hook()
except Exception:
content = traceback.format_exc()
err = 'Exception when calling atexit hook[%s]\n%s' % (hook.__name__, content)
#logger.error(err)
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
Daemon.register_atexit_hook(self.delpid)
atexit.register(Daemon._atexit)
pid = str(os.getpid())
file(self.pidfile,'w').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
pscmd = ShellCmd('ps -p %s > /dev/null' % pid)
pscmd(is_exception=False)
if pscmd.return_code == 0:
message = "Daemon already running, pid is %s\n"
sys.stderr.write(message % pid)
sys.exit(0)
# Start the daemon
self.daemonize()
try:
self.run()
except Exception:
content = traceback.format_exc()
#logger.error(content)
sys.exit(1)
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| apache-2.0 |
sgarrity/bedrock | bedrock/firefox/urls.py | 1 | 7825 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import url
import bedrock.releasenotes.views
from bedrock.mozorg.util import page
from bedrock.releasenotes import version_re
from bedrock.firefox import views
latest_re = r'^firefox(?:/(?P<version>%s))?/%s/$'
firstrun_re = latest_re % (version_re, 'firstrun')
whatsnew_re = latest_re % (version_re, 'whatsnew')
whatsnew_re_india = latest_re % (version_re, 'whatsnew/india')
whatsnew_re_all = latest_re % (version_re, 'whatsnew/all')
platform_re = '(?P<platform>android|ios)'
channel_re = '(?P<channel>beta|aurora|developer|nightly|organizations)'
releasenotes_re = latest_re % (version_re, r'(aurora|release)notes')
android_releasenotes_re = releasenotes_re.replace(r'firefox', 'firefox/android')
ios_releasenotes_re = releasenotes_re.replace(r'firefox', 'firefox/ios')
sysreq_re = latest_re % (version_re, 'system-requirements')
android_sysreq_re = sysreq_re.replace(r'firefox', 'firefox/android')
ios_sysreq_re = sysreq_re.replace(r'firefox', 'firefox/ios')
urlpatterns = (
url(r'^firefox/$', views.firefox_home, name='firefox'),
url(r'^firefox/all/$', views.firefox_all, name='firefox.all'),
url(r'^firefox/accounts/$', views.firefox_accounts, name='firefox.accounts'),
url(r'^firefox/campaign/$', views.campaign, name='firefox.campaign'),
page('firefox/flashback', 'firefox/flashback/index.html', active_locales=['en-US', 'de', 'fr']),
page('firefox/channel/desktop', 'firefox/channel/desktop.html'),
page('firefox/channel/android', 'firefox/channel/android.html'),
page('firefox/channel/ios', 'firefox/channel/ios.html'),
url(r'^firefox/concerts/$', views.firefox_concerts, name='firefox.concerts'),
page('firefox/developer', 'firefox/developer/index.html'),
url('firefox/election/$', views.election_with_cards, name='firefox.election'),
page('firefox/enterprise', 'firefox/enterprise/index.html'),
page('firefox/enterprise/signup', 'firefox/enterprise/signup.html'),
page('firefox/enterprise/signup/thanks', 'firefox/enterprise/signup-thanks.html'),
page('firefox/facebookcontainer', 'firefox/facebookcontainer/index.html'),
page('firefox/features', 'firefox/features/index.html'),
url('^firefox/features/bookmarks/$',
views.FeaturesBookmarksView.as_view(),
name='firefox.features.bookmarks'),
url('^firefox/features/fast/$',
views.FeaturesFastView.as_view(),
name='firefox.features.fast'),
url('^firefox/features/independent/$',
views.FeaturesIndependentView.as_view(),
name='firefox.features.independent'),
url('^firefox/features/memory/$',
views.FeaturesMemoryView.as_view(),
name='firefox.features.memory'),
url('^firefox/features/password-manager/$',
views.FeaturesPasswordManagerView.as_view(),
name='firefox.features.password-manager'),
url('^firefox/features/private-browsing/$',
views.FeaturesPrivateBrowsingView.as_view(),
name='firefox.features.private-browsing'),
url(r'^firefox/ios/testflight/$', views.ios_testflight, name='firefox.ios.testflight'),
page('firefox/mobile', 'firefox/mobile/index.html'),
page('firefox/mobile/get-app', 'firefox/mobile/get-app.html'),
url('^firefox/send-to-device-post/$', views.send_to_device_ajax,
name='firefox.send-to-device-post'),
page('firefox/unsupported-systems', 'firefox/unsupported-systems.html'),
url(r'^firefox/new/$', views.new, name='firefox.new'),
url(r'^firefox/download/thanks/$', views.download_thanks, name='firefox.download.thanks'),
page('firefox/nightly/firstrun', 'firefox/nightly_firstrun.html'),
url(r'^firefox/installer-help/$', views.installer_help,
name='firefox.installer-help'),
url(firstrun_re, views.FirstrunView.as_view(), name='firefox.firstrun'),
url(whatsnew_re, views.WhatsNewRedirectorView.as_view(), name='firefox.whatsnew'),
url(whatsnew_re_india, views.WhatsNewIndiaView.as_view(), name='firefox.whatsnew.india'),
url(whatsnew_re_all, views.WhatsnewView.as_view(), name='firefox.whatsnew.all'),
page('firefox/features/adblocker', 'firefox/features/adblocker.html'),
page('firefox/concerts', 'firefox/concerts.html'),
# Release notes
url('^firefox/(?:%s/)?(?:%s/)?notes/$' % (platform_re, channel_re),
bedrock.releasenotes.views.latest_notes, name='firefox.notes'),
url('^firefox/nightly/notes/feed/$',
bedrock.releasenotes.views.nightly_feed, name='firefox.nightly.notes.feed'),
url('firefox/(?:latest/)?releasenotes/$', bedrock.releasenotes.views.latest_notes,
{'product': 'firefox'}),
url('^firefox/(?:%s/)?(?:%s/)?system-requirements/$' % (platform_re, channel_re),
bedrock.releasenotes.views.latest_sysreq,
{'product': 'firefox'}, name='firefox.sysreq'),
url(releasenotes_re, bedrock.releasenotes.views.release_notes, name='firefox.desktop.releasenotes'),
url(android_releasenotes_re, bedrock.releasenotes.views.release_notes,
{'product': 'Firefox for Android'}, name='firefox.android.releasenotes'),
url(ios_releasenotes_re, bedrock.releasenotes.views.release_notes,
{'product': 'Firefox for iOS'}, name='firefox.ios.releasenotes'),
url(sysreq_re, bedrock.releasenotes.views.system_requirements,
name='firefox.system_requirements'),
url(android_sysreq_re, bedrock.releasenotes.views.system_requirements,
{'product': 'Firefox for Android'}, name='firefox.android.system_requirements'),
url(ios_sysreq_re, bedrock.releasenotes.views.system_requirements,
{'product': 'Firefox for iOS'}, name='firefox.ios.system_requirements'),
url('^firefox/releases/$', bedrock.releasenotes.views.releases_index,
{'product': 'Firefox'}, name='firefox.releases.index'),
url('^firefox/stub_attribution_code/$', views.stub_attribution_code,
name='firefox.stub_attribution_code'),
url(r'^firefox/welcome/1/$', views.firefox_welcome_page1, name='firefox.welcome.page1'),
page('firefox/welcome/2', 'firefox/welcome/page2.html'),
page('firefox/welcome/3', 'firefox/welcome/page3.html'),
page('firefox/welcome/4', 'firefox/welcome/page4.html'),
page('firefox/welcome/5', 'firefox/welcome/page5.html'),
page('firefox/switch', 'firefox/switch.html'),
page('firefox/pocket', 'firefox/pocket.html'),
# Bug 1519084
page('firefox/dedicated-profiles', 'firefox/dedicated-profiles.html'),
# Issue 6178
page('firefox/this-browser-comes-highly-recommended', 'firefox/recommended.html'),
# Issue 6604, SEO firefox/new pages
page('firefox/windows', 'firefox/new/scene1_windows.html'),
page('firefox/mac', 'firefox/new/scene1_mac.html'),
page('firefox/linux', 'firefox/new/scene1_linux.html'),
page('firefox/windows-64-bit', 'firefox/windows-64-bit.html'),
page('firefox/enterprise/sla', 'firefox/enterprise/sla.html'),
page('firefox/features/safebrowser', 'firefox/features/safebrowser.html'),
page('firefox/best-browser', 'firefox/best-browser.html'),
page('firefox/browsers/compare', 'firefox/compare/index.html'),
page('firefox/browsers/compare/chrome', 'firefox/compare/chrome.html'),
# Lockwise
page('firefox/lockwise', 'firefox/lockwise/lockwise.html'),
# Issue 7765, 7709
page('firefox/privacy', 'firefox/privacy/index.html'),
page('firefox/privacy/products', 'firefox/privacy/products.html'),
# Issue 8432
page('firefox/set-as-default/thanks', 'firefox/set-as-default/thanks.html'),
# Default browser campaign
page('firefox/set-as-default', 'firefox/set-as-default/landing-page.html')
)
| mpl-2.0 |
emonty/ansible | hacking/fix_test_syntax.py | 135 | 3563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2017, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Purpose:
# The purpose of this script is to convert uses of tests as filters to proper jinja test syntax
# as part of https://github.com/ansible/proposals/issues/83
# Notes:
# This script is imperfect, but was close enough to "fix" all integration tests
# with the exception of:
#
# 1. One file needed manual remediation, where \\\\ was ultimately replace with \\ in 8 locations.
# 2. Multiple filter pipeline is unsupported. Example:
# var|string|search('foo')
# Which should be converted to:
# var|string is search('foo')
import argparse
import os
import re
from ansible.plugins.test import core, files, mathstuff
TESTS = list(core.TestModule().tests().keys()) + list(files.TestModule().tests().keys()) + list(mathstuff.TestModule().tests().keys())
TEST_MAP = {
'version_compare': 'version',
'is_dir': 'directory',
'is_file': 'file',
'is_link': 'link',
'is_abs': 'abs',
'is_same_file': 'same_file',
'is_mount': 'mount',
'issubset': 'subset',
'issuperset': 'superset',
'isnan': 'nan',
'succeeded': 'successful',
'success': 'successful',
'change': 'changed',
'skip': 'skipped',
}
FILTER_RE = re.compile(r'((.+?)\s*([\w \.\'"]+)(\s*)\|(\s*)(\w+))')
NOT_RE = re.compile(r'( ?)not ')
ASSERT_SPACE_RE = re.compile(r'- ([\'"])\s+')
parser = argparse.ArgumentParser()
parser.add_argument(
'path',
help='Path to a directory that will be recursively walked. All .yml and .yaml files will be evaluated '
'and uses of tests as filters will be conveted to proper jinja test syntax files to have test syntax '
'fixed'
)
args = parser.parse_args()
for root, dirs, filenames in os.walk(args.path):
for name in filenames:
if os.path.splitext(name)[1] not in ('.yml', '.yaml'):
continue
path = os.path.join(root, name)
print(path)
with open(path) as f:
text = f.read()
for match in FILTER_RE.findall(text):
filter_name = match[5]
is_not = match[2].strip(' "\'').startswith('not ')
try:
test_name = TEST_MAP[filter_name]
except KeyError:
test_name = filter_name
if test_name not in TESTS:
continue
if is_not:
before = NOT_RE.sub(r'\1', match[2]).rstrip()
text = re.sub(
re.escape(match[0]),
'%s %s is not %s' % (match[1], before, test_name,),
text
)
else:
text = re.sub(
re.escape(match[0]),
'%s %s is %s' % (match[1], match[2].rstrip(), test_name,),
text
)
with open(path, 'w+') as f:
f.write(text)
| gpl-3.0 |
open-pli/enigma2 | lib/python/Components/ActionMap.py | 45 | 2560 | from enigma import eActionMap
class ActionMap:
def __init__(self, contexts = [ ], actions = { }, prio=0):
self.actions = actions
self.contexts = contexts
self.prio = prio
self.p = eActionMap.getInstance()
self.bound = False
self.exec_active = False
self.enabled = True
def setEnabled(self, enabled):
self.enabled = enabled
self.checkBind()
def doBind(self):
if not self.bound:
for ctx in self.contexts:
self.p.bindAction(ctx, self.prio, self.action)
self.bound = True
def doUnbind(self):
if self.bound:
for ctx in self.contexts:
self.p.unbindAction(ctx, self.action)
self.bound = False
def checkBind(self):
if self.exec_active and self.enabled:
self.doBind()
else:
self.doUnbind()
def execBegin(self):
self.exec_active = True
self.checkBind()
def execEnd(self):
self.exec_active = False
self.checkBind()
def action(self, context, action):
print " ".join(("action -> ", context, action))
if self.actions.has_key(action):
res = self.actions[action]()
if res is not None:
return res
return 1
else:
print "unknown action %s/%s! typo in keymap?" % (context, action)
return 0
def destroy(self):
pass
class NumberActionMap(ActionMap):
def action(self, contexts, action):
numbers = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
if (action in numbers and self.actions.has_key(action)):
res = self.actions[action](int(action))
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class HelpableActionMap(ActionMap):
"""An Actionmap which automatically puts the actions into the helpList.
Note that you can only use ONE context here!"""
# sorry for this complicated code.
# it's not more than converting a "documented" actionmap
# (where the values are possibly (function, help)-tuples)
# into a "classic" actionmap, where values are just functions.
# the classic actionmap is then passed to the ActionMap constructor,
# the collected helpstrings (with correct context, action) is
# added to the screen's "helpList", which will be picked up by
# the "HelpableScreen".
def __init__(self, parent, context, actions = { }, prio=0):
alist = [ ]
adict = { }
for (action, funchelp) in actions.iteritems():
# check if this is a tuple
if isinstance(funchelp, tuple):
alist.append((action, funchelp[1]))
adict[action] = funchelp[0]
else:
adict[action] = funchelp
ActionMap.__init__(self, [context], adict, prio)
parent.helpList.append((self, context, alist))
| gpl-2.0 |
gurneyalex/OpenUpgrade | addons/mail/tests/test_mail_message.py | 38 | 27445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailMail(TestMail):
def test_00_partner_find_from_email(self):
""" Tests designed for partner fetch based on emails. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 1 - Partner ARaoul
p_a_id = self.res_partner.create(cr, uid, {'name': 'ARaoul', 'email': '[email protected]'})
# --------------------------------------------------
# CASE1: without object
# --------------------------------------------------
# Do: find partner with email -> first partner should be found
partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul <[email protected]>'], link_mail=False)[0]
self.assertEqual(partner_info['full_name'], 'Maybe Raoul <[email protected]>',
'mail_thread: message_partner_info_from_emails did not handle email')
self.assertEqual(partner_info['partner_id'], p_a_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
# Data: add some data about partners
# 2 - User BRaoul
p_b_id = self.res_partner.create(cr, uid, {'name': 'BRaoul', 'email': '[email protected]', 'user_ids': [(4, user_raoul.id)]})
# Do: find partner with email -> first user should be found
partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul <[email protected]>'], link_mail=False)[0]
self.assertEqual(partner_info['partner_id'], p_b_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
# --------------------------------------------------
# CASE1: with object
# --------------------------------------------------
# Do: find partner in group where there is a follower with the email -> should be taken
self.mail_group.message_subscribe(cr, uid, [group_pigs.id], [p_b_id])
partner_info = self.mail_group.message_partner_info_from_emails(cr, uid, group_pigs.id, ['Maybe Raoul <[email protected]>'], link_mail=False)[0]
self.assertEqual(partner_info['partner_id'], p_b_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
class TestMailMessage(TestMail):
def test_00_mail_message_values(self):
""" Tests designed for testing email values based on mail.message, aliases, ... """
cr, uid, user_raoul_id = self.cr, self.uid, self.user_raoul_id
# Data: update + generic variables
reply_to1 = '[email protected]'
reply_to2 = '[email protected]'
email_from1 = '[email protected]'
alias_domain = 'schlouby.fr'
raoul_from = 'Raoul Grosbedon <[email protected]>'
raoul_from_alias = 'Raoul Grosbedon <[email protected]>'
raoul_reply = '"Followers of Pigs" <[email protected]>'
raoul_reply_alias = '"Followers of Pigs" <[email protected]>'
# --------------------------------------------------
# Case1: without alias_domain
# --------------------------------------------------
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: free message; specified values > default values
msg_id = self.mail_message.create(cr, user_raoul_id, {'reply_to': reply_to1, 'email_from': email_from1})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: message content
self.assertIn('reply_to', msg.message_id,
'mail_message: message_id should be specific to a mail_message with a given reply_to')
self.assertEqual(msg.reply_to, reply_to1,
'mail_message: incorrect reply_to: should come from values')
self.assertEqual(msg.email_from, email_from1,
'mail_message: incorrect email_from: should come from values')
# Do: create a mail_mail with the previous mail_message + specified reply_to
mail_id = self.mail_mail.create(cr, user_raoul_id, {'mail_message_id': msg_id, 'state': 'cancel', 'reply_to': reply_to2})
mail = self.mail_mail.browse(cr, user_raoul_id, mail_id)
# Test: mail_mail content
self.assertEqual(mail.reply_to, reply_to2,
'mail_mail: incorrect reply_to: should come from values')
self.assertEqual(mail.email_from, email_from1,
'mail_mail: incorrect email_from: should come from mail.message')
# Do: mail_message attached to a document
msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: message content
self.assertIn('mail.group', msg.message_id,
'mail_message: message_id should contain model')
self.assertIn('%s' % self.group_pigs_id, msg.message_id,
'mail_message: message_id should contain res_id')
self.assertEqual(msg.reply_to, raoul_reply,
'mail_message: incorrect reply_to: should be Raoul')
self.assertEqual(msg.email_from, raoul_from,
'mail_message: incorrect email_from: should be Raoul')
# --------------------------------------------------
# Case2: with alias_domain, without catchall alias
# --------------------------------------------------
self.registry('ir.config_parameter').set_param(cr, uid, 'mail.catchall.domain', alias_domain)
self.registry('ir.config_parameter').unlink(cr, uid, self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.alias')]))
# Update message
msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, raoul_reply_alias,
'mail_mail: incorrect reply_to: should be Pigs alias')
# Update message: test alias on email_from
msg_id = self.mail_message.create(cr, user_raoul_id, {})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, raoul_from_alias,
'mail_mail: incorrect reply_to: should be message email_from using Raoul alias')
# --------------------------------------------------
# Case2: with alias_domain and catchall alias
# --------------------------------------------------
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.alias', 'gateway')
# Update message
msg_id = self.mail_message.create(cr, user_raoul_id, {})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, '[email protected]',
'mail_mail: reply_to should equal the catchall email alias')
# Do: create a mail_mail
mail_id = self.mail_mail.create(cr, uid, {'state': 'cancel', 'reply_to': '[email protected]'})
mail = self.mail_mail.browse(cr, uid, mail_id)
# Test: mail_mail content
self.assertEqual(mail.reply_to, '[email protected]',
'mail_mail: reply_to should equal the rpely_to given to create')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_10_mail_message_search_access_rights(self):
""" Testing mail_message.search() using specific _search implementation """
cr, uid, group_pigs_id = self.cr, self.uid, self.group_pigs_id
# Data: comment subtype for mail.message creation
ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'mail', 'mt_comment')
subtype_id = ref and ref[1] or False
# Data: Birds group, private
group_birds_id = self.mail_group.create(self.cr, self.uid, {'name': 'Birds', 'public': 'private'})
# Data: Raoul is member of Pigs
self.mail_group.message_subscribe(cr, uid, [group_pigs_id], [self.partner_raoul_id])
# Data: various author_ids, partner_ids, documents
msg_id1 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A', 'subtype_id': subtype_id})
msg_id2 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B', 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id3 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'subtype_id': subtype_id})
msg_id4 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id5 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+R Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
msg_id6 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Birds', 'model': 'mail.group', 'res_id': group_birds_id, 'subtype_id': subtype_id})
msg_id7 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B', 'subtype_id': subtype_id})
msg_id8 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B+R', 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
# Test: Bert: 2 messages that have Bert in partner_ids
msg_ids = self.mail_message.search(cr, self.user_bert_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id2, msg_id4]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group)
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test'), ('body', 'like', 'A')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
# Test: Admin: all messages
msg_ids = self.mail_message.search(cr, uid, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id1, msg_id2, msg_id3, msg_id4, msg_id5, msg_id6, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_15_mail_message_check_access_rule(self):
""" Testing mail_message.check_access_rule() """
cr, uid = self.cr, self.uid
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')
priv_msg_id = self.mail_group.message_post(cr, uid, self.group_priv_id, body='Message')
# prepare an attachment
attachment_id = self.ir_attachment.create(cr, uid, {'datas': 'My attachment'.encode('base64'), 'name': 'doc.txt', 'datas_fname': 'doc.txt'})
# ----------------------------------------
# CASE1: read
# ----------------------------------------
# Do: create a new mail.message
message_id = self.mail_message.create(cr, uid, {'body': 'My Body', 'attachment_ids': [(4, attachment_id)]})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# Do: message is pushed to Bert
notif_id = self.mail_notification.create(cr, uid, {'message_id': message_id, 'partner_id': partner_bert_id})
# Test: Bert reads the message, ok because notification pushed
self.mail_message.read(cr, user_bert_id, message_id)
# Test: Bert downloads attachment, ok because he can read message
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: remove notification
self.mail_notification.unlink(cr, uid, notif_id)
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, self.user_bert_id, message_id)
# Test: Bert downloads attachment, crash because he can't read message
self.assertRaises(except_orm, self.mail_message.download_attachment,
cr, user_bert_id, message_id, attachment_id)
# Do: Bert is now the author
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_bert_id})
# Test: Bert reads the message, ok because Bert is the author
self.mail_message.read(cr, user_bert_id, message_id)
# Do: Bert is not the author anymore
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_raoul_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# Do: message is attached to a document Bert can read, Jobs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_jobs_id})
# Test: Bert reads the message, ok because linked to a doc he is allowed to read
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert cannot read, Pigs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_pigs_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# ----------------------------------------
# CASE2: create
# ----------------------------------------
# Do: Bert creates a message on Pigs -> ko, no creation rights
self.assertRaises(AccessError, self.mail_message.create,
cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_pigs_id, 'body': 'Test'})
# Do: Bert create a message on Jobs -> ko, no creation rights
self.assertRaises(AccessError, self.mail_message.create,
cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Bert create a private message -> ko, no creation rights
self.assertRaises(AccessError, self.mail_message.create,
cr, user_bert_id, {'body': 'Test'})
# Do: Raoul creates a message on Jobs -> ok, write access to the related document
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Raoul creates a message on Priv -> ko, no write access to the related document
self.assertRaises(except_orm, self.mail_message.create,
cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test'})
# Do: Raoul creates a private message -> ok
self.mail_message.create(cr, user_raoul_id, {'body': 'Test'})
# Do: Raoul creates a reply to a message on Priv -> ko
self.assertRaises(except_orm, self.mail_message.create,
cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
# Do: Raoul creates a reply to a message on Priv-> ok if has received parent
self.mail_notification.create(cr, uid, {'message_id': priv_msg_id, 'partner_id': self.partner_raoul_id})
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
def test_20_message_set_star(self):
""" Tests for starring messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin stars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg.starred, 'mail_message starred failed')
# Do: Raoul stars msg
self.mail_message.set_message_starred(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
# Do: Admin unstars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unstarred for Admin, starred for Raoul
self.assertFalse(msg.starred, 'mail_message starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
def test_30_message_set_read(self):
""" Tests for reading messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin reads msg
self.mail_message.set_message_read(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.read, 'mail_notification read failed')
self.assertFalse(msg.to_read, 'mail_message read failed')
# Do: Raoul reads msg
self.mail_message.set_message_read(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.read, 'mail_notification starred failed')
self.assertFalse(msg_raoul.to_read, 'mail_message starred failed')
# Do: Admin unreads msg
self.mail_message.set_message_read(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unread for Admin, read for Raoul
self.assertTrue(msg.to_read, 'mail_message read failed')
self.assertFalse(msg_raoul.to_read, 'mail_message read failed')
def test_40_message_vote(self):
""" Test designed for the vote/unvote feature. """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin vote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
# Test: msg has Admin as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')
# Do: Bert vote for msg
self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])
msg_raoul.refresh()
# Test: msg has Admin and Bert as voters
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')
# Do: Admin unvote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
msg_raoul.refresh()
# Test: msg has Bert as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_50_mail_flow_access_rights(self):
""" Test a Chatter-looks alike flow to test access rights """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[self.partner_admin_id])
jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[self.partner_admin_id])
# ----------------------------------------
# CASE1: Bert, without groups
# ----------------------------------------
# Do: Bert reads Jobs basic fields, ok because public = read access on the group
self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['name', 'description'])
# Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages
jobs_message_ids = self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['message_ids'])['message_ids']
self.mail_message.read(cr, user_bert_id, jobs_message_ids)
# Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)
bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)
trigger_read = bert_jobs.name
for message in bert_jobs.message_ids:
trigger_read = message.subject
for partner in bert_jobs.message_follower_ids:
with self.assertRaises(AccessError):
trigger_read = partner.name
# Do: Bert comments Jobs, ko because no creation right
self.assertRaises(AccessError,
self.mail_group.message_post,
cr, user_bert_id, self.group_jobs_id, body='I love Pigs')
# Do: Bert writes on its own profile, ko because no message create access
with self.assertRaises(AccessError):
self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')
self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')
# ----------------------------------------
# CASE2: Raoul, employee
# ----------------------------------------
# Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids
raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)
trigger_read = raoul_jobs.name
for message in raoul_jobs.message_ids:
trigger_read = message.subject
for partner in raoul_jobs.message_follower_ids:
trigger_read = partner.name
# Do: Raoul comments Jobs, ok
self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')
# Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul replies to a Jobs message using the composer
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text'},
{'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
| agpl-3.0 |
tseaver/google-cloud-python | logging/tests/unit/handlers/test_handlers.py | 2 | 4970 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
class TestCloudLoggingHandler(unittest.TestCase):
PROJECT = "PROJECT"
@staticmethod
def _get_target_class():
from google.cloud.logging.handlers.handlers import CloudLoggingHandler
return CloudLoggingHandler
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
import sys
from google.cloud.logging.logger import _GLOBAL_RESOURCE
from google.cloud.logging.handlers.handlers import DEFAULT_LOGGER_NAME
client = _Client(self.PROJECT)
handler = self._make_one(client, transport=_Transport)
self.assertEqual(handler.name, DEFAULT_LOGGER_NAME)
self.assertIs(handler.client, client)
self.assertIsInstance(handler.transport, _Transport)
self.assertIs(handler.transport.client, client)
self.assertEqual(handler.transport.name, DEFAULT_LOGGER_NAME)
self.assertIs(handler.resource, _GLOBAL_RESOURCE)
self.assertIsNone(handler.labels)
self.assertIs(handler.stream, sys.stderr)
def test_ctor_explicit(self):
import io
from google.cloud.logging.resource import Resource
resource = Resource("resource_type", {"resource_label": "value"})
labels = {"handler_lable": "value"}
name = "test-logger"
client = _Client(self.PROJECT)
stream = io.BytesIO()
handler = self._make_one(
client,
name=name,
transport=_Transport,
resource=resource,
labels=labels,
stream=stream,
)
self.assertEqual(handler.name, name)
self.assertIs(handler.client, client)
self.assertIsInstance(handler.transport, _Transport)
self.assertIs(handler.transport.client, client)
self.assertEqual(handler.transport.name, name)
self.assertIs(handler.resource, resource)
self.assertEqual(handler.labels, labels)
self.assertIs(handler.stream, stream)
def test_emit(self):
from google.cloud.logging.logger import _GLOBAL_RESOURCE
client = _Client(self.PROJECT)
handler = self._make_one(
client, transport=_Transport, resource=_GLOBAL_RESOURCE
)
logname = "loggername"
message = "hello world"
record = logging.LogRecord(logname, logging, None, None, message, None, None)
handler.emit(record)
self.assertEqual(
handler.transport.send_called_with,
(record, message, _GLOBAL_RESOURCE, None),
)
class TestSetupLogging(unittest.TestCase):
def _call_fut(self, handler, excludes=None):
from google.cloud.logging.handlers.handlers import setup_logging
if excludes:
return setup_logging(handler, excluded_loggers=excludes)
else:
return setup_logging(handler)
def test_setup_logging(self):
handler = _Handler(logging.INFO)
self._call_fut(handler)
root_handlers = logging.getLogger().handlers
self.assertIn(handler, root_handlers)
def test_setup_logging_excludes(self):
INCLUDED_LOGGER_NAME = "includeme"
EXCLUDED_LOGGER_NAME = "excludeme"
handler = _Handler(logging.INFO)
self._call_fut(handler, (EXCLUDED_LOGGER_NAME,))
included_logger = logging.getLogger(INCLUDED_LOGGER_NAME)
self.assertTrue(included_logger.propagate)
excluded_logger = logging.getLogger(EXCLUDED_LOGGER_NAME)
self.assertNotIn(handler, excluded_logger.handlers)
self.assertFalse(excluded_logger.propagate)
def setUp(self):
self._handlers_cache = logging.getLogger().handlers[:]
def tearDown(self):
# cleanup handlers
logging.getLogger().handlers = self._handlers_cache[:]
class _Handler(object):
def __init__(self, level):
self.level = level
def acquire(self):
pass # pragma: NO COVER
def release(self):
pass # pragma: NO COVER
class _Client(object):
def __init__(self, project):
self.project = project
class _Transport(object):
def __init__(self, client, name):
self.client = client
self.name = name
def send(self, record, message, resource, labels=None):
self.send_called_with = (record, message, resource, labels)
| apache-2.0 |
timthelion/FreeCAD | src/Mod/Ship/shipCreateTank/TaskPanel.py | 8 | 6620 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2016 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD as App
import FreeCADGui as Gui
import Units
from PySide import QtGui, QtCore
import Tools
import TankInstance as Instance
from shipUtils import Paths
import shipUtils.Units as USys
class TaskPanel:
def __init__(self):
"""Constructor"""
self.ui = Paths.modulePath() + "/shipCreateTank/TaskPanel.ui"
def accept(self):
"""Create the ship instance"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.ship = self.widget(QtGui.QComboBox, "Ship")
ship = self.ships[form.ship.currentIndex()]
Tools.createTank(self.solids, ship)
return True
def reject(self):
"""Cancel the job"""
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
"""Create and configurate the user interface"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.ship = self.widget(QtGui.QComboBox, "Ship")
self.form = form
if self.initValues():
return True
self.retranslateUi()
def getMainWindow(self):
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise Exception("No main window found")
def widget(self, class_id, name):
"""Return the selected widget.
Keyword arguments:
class_id -- Class identifier
name -- Name of the widget
"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
return form.findChild(class_id, name)
def initValues(self):
"""Setup the initial values"""
# Ensure that there are at least one valid object to generate the
# tank
selObjs = Gui.Selection.getSelection()
self.solids = []
if not selObjs:
msg = QtGui.QApplication.translate(
"ship_tank",
"Tanks objects can only be created on top of its geometry"
" (no objects selected)",
None,
QtGui.QApplication.UnicodeUTF8)
App.Console.PrintError(msg + '\n')
return True
for obj in selObjs:
try:
self.solids.extend(obj.Shape.Solids)
except:
continue
if not len(self.solids):
msg = QtGui.QApplication.translate(
"ship_tank",
"No solids found in the selected objects",
None,
QtGui.QApplication.UnicodeUTF8)
App.Console.PrintError(msg + '\n')
return True
# Ensure as well that exist at least one valid ship to create the
# entity inside it
self.ships = []
for obj in App.ActiveDocument.Objects:
try:
if obj.IsShip:
self.ships.append(obj)
except:
continue
if not len(self.ships):
msg = QtGui.QApplication.translate(
"ship_tank",
"There are not ship objects to create weights into them",
None,
QtGui.QApplication.UnicodeUTF8)
App.Console.PrintError(msg + '\n')
return True
# Fill the ships combo box
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.ship = self.widget(QtGui.QComboBox, "Ship")
icon = QtGui.QIcon(QtGui.QPixmap(":/icons/Ship_Instance.svg"))
form.ship.clear()
for ship in self.ships:
form.ship.addItem(icon, ship.Label)
form.ship.setCurrentIndex(0)
return False
def retranslateUi(self):
"""Set the user interface locale strings."""
self.form.setWindowTitle(QtGui.QApplication.translate(
"ship_tank",
"Create a new tank",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "ShipLabel").setText(
QtGui.QApplication.translate(
"ship_tank",
"Ship",
None,
QtGui.QApplication.UnicodeUTF8))
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
| lgpl-2.1 |
gilneidp/FinalProject | ALL_FILES/pox/misc/mac_blocker.py | 46 | 3794 | # Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gives a GUI for blocking individual MAC addresses.
Meant to work with reactive components like l2_learning or l2_pairs.
Start with --no-clear-tables if you don't want to clear tables on changes.
"""
from pox.core import core
from pox.lib.revent import EventHalt
from pox.lib.addresses import EthAddr
import pox.openflow.libopenflow_01 as of
from Tkinter import *
# Sets of blocked and unblocked MACs
blocked = set()
unblocked = set()
# Listbox widgets
unblocked_list = None
blocked_list = None
# If True, clear tables on every block/unblock
clear_tables_on_change = True
def add_mac (mac):
if mac.is_multicast: return
if mac.is_bridge_filtered: return
if mac in blocked: return
if mac in unblocked: return
unblocked.add(mac)
core.tk.do(unblocked_list.insert, None, END, str(mac))
def packet_handler (event):
# Note the two MACs
add_mac(event.parsed.src)
add_mac(event.parsed.dst)
# Check for blocked MACs
if event.parsed.src in blocked:
return EventHalt
if event.parsed.dst in blocked:
return EventHalt
def get (l):
""" Get an element from a listbox """
try:
i = l.curselection()[0]
mac = l.get(i)
return i,mac
except:
pass
return None,None
def clear_flows ():
""" Clear flows on all switches """
for c in core.openflow.connections:
d = of.ofp_flow_mod(command = of.OFPFC_DELETE)
c.send(d)
def move_entry (from_list, from_set, to_list, to_set):
""" Move entry from one list to another """
i,mac = get(from_list)
if mac is None: return
from_list.delete(i)
to_list.insert(END, mac)
mac = EthAddr(mac)
to_set.add(mac)
from_set.remove(mac)
if clear_tables_on_change:
# This is coming from another thread, so don't just send -- use
# callLater so that it happens from the coop thread.
core.callLater(clear_flows)
def do_block ():
""" Handle clicks on block button """
move_entry(unblocked_list, unblocked, blocked_list, blocked)
def do_unblock ():
""" Handle clicks on unblock button """
move_entry(blocked_list, blocked, unblocked_list, unblocked)
def setup ():
""" Set up GUI """
global unblocked_list, blocked_list
top = Toplevel()
top.title("MAC Blocker")
# Shut down POX when window is closed
top.protocol("WM_DELETE_WINDOW", core.quit)
box1 = Frame(top)
box2 = Frame(top)
l1 = Label(box1, text="Allowed")
l2 = Label(box2, text="Blocked")
unblocked_list = Listbox(box1)
blocked_list = Listbox(box2)
l1.pack()
l2.pack()
unblocked_list.pack(expand=True,fill=BOTH)
blocked_list.pack(expand=True,fill=BOTH)
buttons = Frame(top)
block_button = Button(buttons, text="Block >>", command=do_block)
unblock_button = Button(buttons, text="<< Unblock", command=do_unblock)
block_button.pack()
unblock_button.pack()
opts = {"side":LEFT,"fill":BOTH,"expand":True}
box1.pack(**opts)
buttons.pack(**{"side":LEFT})
box2.pack(**opts)
core.getLogger().debug("Ready")
def launch (no_clear_tables = False):
global clear_tables_on_change
clear_tables_on_change = not no_clear_tables
def start ():
core.openflow.addListenerByName("PacketIn",packet_handler,priority=1)
core.tk.do(setup)
core.call_when_ready(start, ['openflow','tk'])
| mit |
magicrub/MissionPlanner | Lib/encodings/mbcs.py | 103 | 1258 | """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond ([email protected]) from ascii.py,
which was written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
neumerance/deploy | .venv/lib/python2.7/site-packages/requests/packages/urllib3/contrib/ntlmpool.py | 59 | 4740 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| apache-2.0 |
drdelta/notenso | src/platform/win32/SConsTools/SwigTool.py | 7 | 5611 | import os
import re
import SCons.Builder
import SCons.Scanner
from Helpers import addInstanceMethodToEnv
def buildSwigExtension( env,
swigInterfaceFile,
source = None,
isCpp = True,
**kwargs ):
"""
Builds a SWIG extension by calling a SwigC/SwigCpp builder
method and then a SharedLibrary builder method.
"""
if isCpp:
# We need to dynamically determine swigWrapper and pyFile
# because the returned targets may contain a variable
# number of files--if directors are enabled.
files = env.SwigCpp( source=swigInterfaceFile )
swigWrapper = [ f for f in files
if f.path.endswith( ".cxx" ) ][0]
pyFile = [ f for f in files
if f.path.endswith( ".py" ) ][0]
else:
swigWrapper, pyFile = env.SwigC( source=swigInterfaceFile )
sourceList = [swigWrapper]
if source:
sourceList.append( source )
# If our SWIG interface file is "foo.i", our target file will
# be "_foo".
fileName = os.path.basename( swigInterfaceFile )
targetFileName = "_%s" % os.path.splitext( fileName )[0]
pydFile, libFile, expFile = env.SharedLibrary(
target=targetFileName,
source=sourceList,
**kwargs
)
return [pydFile, pyFile]
# ----------------------------------------------------------------------------
# SWIG Builders and Scanner
# ----------------------------------------------------------------------------
# SWIG Builders
def swigBuilderModifyTargets( target, source, env ):
"""
Emitter for the Swig Builder.
"""
# Assign param to dummy variable to ensure that pychecker
# doesn't complain.
_ = env
for i in source:
name = str( i )[:-2]
# If directors are enabled, then add the "*_wrap.h" file as a
# target.
text = i.get_contents()
if text.find( "\"director\"" ) != -1:
target.append( "%s_wrap.h" % name )
# Add the "*.py" file as a target.
target.append( "%s.py" % name )
return target, source
def swigBuilderGenerator( source, target, env, for_signature ):
"""
Generator for the Swig Builder.
"""
# Assign param to dummy variable to ensure that pychecker
# doesn't complain.
_ = for_signature
import os.path
sourceFile = str(source[0])
targetFile = str(target[0])
dirName = os.path.dirname( sourceFile )
if len( dirName ) == 0:
dirName = "."
if targetFile.endswith( ".cxx" ):
cmdStr = "${SWIG} -c++"
else:
cmdStr = "${SWIG}"
# Read the environment's CPPPATH and turn that into the Swig
# include path.
if env.has_key( "CPPPATH" ):
for includeDirName in env["CPPPATH"]:
# Expand out those variables and "#" characters.
includeDirName = env.Dir( env.subst(includeDirName) ).path
cmdStr += ' "-I%s"' % includeDirName
cmdStr += " -Werror -outdir %s -python %s"
finalCmd = cmdStr % ( dirName, sourceFile )
return finalCmd
swigCBuilder = SCons.Builder.Builder(
generator = swigBuilderGenerator,
suffix = "_wrap.c",
src_suffix = ".i",
emitter = swigBuilderModifyTargets
)
swigCppBuilder = SCons.Builder.Builder(
generator = swigBuilderGenerator,
suffix = "_wrap.cxx",
src_suffix = ".i",
emitter = swigBuilderModifyTargets
)
# SWIG Scanner
swigInterfaceFileRe = re.compile( r'%include\s+"(.*)"' )
def swigInterfaceFileScan( node, env, path, arg = None ):
"""
Main function for Swig interface (.i) file Scanner.
"""
# Assign param to dummy variable to ensure that pychecker
# doesn't complain.
_ = arg
contents = node.get_contents()
includedFiles = swigInterfaceFileRe.findall( contents )
implicitDependencies = [ fileName for fileName in includedFiles
if fileName.endswith( ".h" ) ]
theFiles = []
for fileName in implicitDependencies:
pathFound = False
for dirName in path:
relPath = env.Dir( dirName ).abspath
filePath = os.path.join( relPath, fileName )
if os.path.exists( filePath ):
theFiles.append( filePath )
pathFound = True
break
if not pathFound:
raise Exception( "Dependency '%s' mentioned in '%s' not found." %
(fileName, node.path) )
return theFiles
def swigInterfaceFilePath( env, node, unknown1, unknown2 ):
"""
Path function for Swig interface (.i) file Scanner.
"""
# Assign params to dummy variables to ensure that pychecker
# doesn't complain.
_, _ = unknown1, unknown2
return tuple( [node.path] + env["CPPPATH"] )
swigInterfaceFileScanner = SCons.Scanner.Scanner(
function = swigInterfaceFileScan,
path_function = swigInterfaceFilePath,
skeys = [".i"]
)
def generate( env ):
# Add the Builders and Scanner to the environment.
env.Append(
BUILDERS = { "SwigC" : swigCBuilder,
"SwigCpp" : swigCppBuilder, },
SCANNERS = swigInterfaceFileScanner,
)
addInstanceMethodToEnv( env, buildSwigExtension )
def exists( env ):
if env.has_key( "SWIG" ):
return 1
else:
return 0
| bsd-3-clause |
Mohamed711/Quiz-Program | vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/v8/tools/testrunner/server/work_handler.py | 123 | 5569 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import SocketServer
import stat
import subprocess
import threading
from . import compression
from . import constants
from . import signatures
from ..network import endpoint
from ..objects import workpacket
class WorkHandler(SocketServer.BaseRequestHandler):
def handle(self):
rec = compression.Receiver(self.request)
while not rec.IsDone():
data = rec.Current()
with self.server.job_lock:
self._WorkOnWorkPacket(data)
rec.Advance()
def _WorkOnWorkPacket(self, data):
server_root = self.server.daemon.root
v8_root = os.path.join(server_root, "v8")
os.chdir(v8_root)
packet = workpacket.WorkPacket.Unpack(data)
self.ctx = packet.context
self.ctx.shell_dir = os.path.join("out",
"%s.%s" % (self.ctx.arch, self.ctx.mode))
if not os.path.isdir(self.ctx.shell_dir):
os.makedirs(self.ctx.shell_dir)
for binary in packet.binaries:
if not self._UnpackBinary(binary, packet.pubkey_fingerprint):
return
if not self._CheckoutRevision(packet.base_revision):
return
if not self._ApplyPatch(packet.patch):
return
tests = packet.tests
endpoint.Execute(v8_root, self.ctx, tests, self.request, self.server.daemon)
self._SendResponse()
def _SendResponse(self, error_message=None):
try:
if error_message:
compression.Send([[-1, error_message]], self.request)
compression.Send(constants.END_OF_STREAM, self.request)
return
except Exception, e:
pass # Peer is gone. There's nothing we can do.
# Clean up.
self._Call("git checkout -f")
self._Call("git clean -f -d")
self._Call("rm -rf %s" % self.ctx.shell_dir)
def _UnpackBinary(self, binary, pubkey_fingerprint):
binary_name = binary["name"]
if binary_name == "libv8.so":
libdir = os.path.join(self.ctx.shell_dir, "lib.target")
if not os.path.exists(libdir): os.makedirs(libdir)
target = os.path.join(libdir, binary_name)
else:
target = os.path.join(self.ctx.shell_dir, binary_name)
pubkeyfile = "../trusted/%s.pem" % pubkey_fingerprint
if not signatures.VerifySignature(target, binary["blob"],
binary["sign"], pubkeyfile):
self._SendResponse("Signature verification failed")
return False
os.chmod(target, stat.S_IRWXU)
return True
def _CheckoutRevision(self, base_svn_revision):
get_hash_cmd = (
"git log -1 --format=%%H --remotes --grep='^git-svn-id:.*@%s'" %
base_svn_revision)
try:
base_revision = subprocess.check_output(get_hash_cmd, shell=True)
if not base_revision: raise ValueError
except:
self._Call("git fetch")
try:
base_revision = subprocess.check_output(get_hash_cmd, shell=True)
if not base_revision: raise ValueError
except:
self._SendResponse("Base revision not found.")
return False
code = self._Call("git checkout -f %s" % base_revision)
if code != 0:
self._SendResponse("Error trying to check out base revision.")
return False
code = self._Call("git clean -f -d")
if code != 0:
self._SendResponse("Failed to reset checkout")
return False
return True
def _ApplyPatch(self, patch):
if not patch: return True # Just skip if the patch is empty.
patchfilename = "_dtest_incoming_patch.patch"
with open(patchfilename, "w") as f:
f.write(patch)
code = self._Call("git apply %s" % patchfilename)
if code != 0:
self._SendResponse("Error applying patch.")
return False
return True
def _Call(self, cmd):
return subprocess.call(cmd, shell=True)
class WorkSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, daemon):
address = (daemon.ip, constants.PEER_PORT)
SocketServer.TCPServer.__init__(self, address, WorkHandler)
self.job_lock = threading.Lock()
self.daemon = daemon
| cc0-1.0 |
jmartinm/invenio-master | modules/miscutil/lib/plotextractor.py | 13 | 53628 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
import os
import getopt
import re
import time
from invenio.shellutils import run_shell_command, Timeout, run_process_with_timeout
from invenio.invenio_connector import InvenioConnector
from invenio.textutils import wrap_text_in_a_box, \
wait_for_user
from invenio.config import CFG_TMPSHAREDDIR, CFG_SITE_URL, \
CFG_PLOTEXTRACTOR_DISALLOWED_TEX, \
CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT, \
CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT, \
CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
from invenio.bibtask import task_low_level_submission
from invenio.plotextractor_getter import get_list_of_all_matching_files, \
parse_and_download, \
make_single_directory, \
tarballs_by_recids, \
tarballs_by_arXiv_id
from invenio.plotextractor_converter import untar, extract_text, \
convert_images
from invenio.plotextractor_output_utils import assemble_caption, \
find_open_and_close_braces, \
create_MARC, get_tex_location, \
get_image_location, \
create_contextfiles, \
prepare_image_data, \
write_message, remove_dups
from tempfile import mkstemp
"""
This programme will take a tarball from arXiv, untar it, convert all its
associated images to PNG, find the captions to the images detailed in the
included TeX document, and write MARCXML that reflects these associations.
"""
ARXIV_HEADER = 'arXiv:'
PLOTS_DIR = 'plots'
MAIN_CAPTION_OR_IMAGE = 0
SUB_CAPTION_OR_IMAGE = 1
def main():
"""
The main program loop.
"""
help_param = 'help'
verbose_param = 'verbose'
tarball_param = 'tarball'
tardir_param = 'tdir'
infile_param = 'input'
sdir_param = 'sdir'
extract_text_param = 'extract-text'
force_param = 'force'
upload_param = 'call-bibupload'
upload_mode_param = 'upload-mode'
yes_i_know_param = 'yes-i-know'
recid_param = 'recid'
with_docname_param = 'with-docname'
with_doctype_param = 'with-doctype'
with_docformat_param = 'with-docformat'
arXiv_param = 'arXiv'
squash_param = 'squash'
refno_url_param = 'refno-url'
refno_param = 'skip-refno'
clean_param = 'clean'
param_abbrs = 'h:t:d:s:i:a:l:xfuyr:qck'
params = [help_param, tarball_param + '=', tardir_param + '=', \
sdir_param + '=', infile_param + '=', arXiv_param + '=', refno_url_param + '=', \
extract_text_param, force_param, upload_param, yes_i_know_param, recid_param + '=', \
squash_param, clean_param, refno_param, with_docname_param + '=', \
with_doctype_param + '=', with_docformat_param + '=', upload_mode_param + '=']
try:
opts, args = getopt.getopt(sys.argv[1:], param_abbrs, params)
except getopt.GetoptError, err:
write_message(str(err))
usage()
sys.exit(2)
tarball = None
sdir = None
infile = None
tdir = None
xtract_text = False
upload_plots = False
force = False
squash = False
squash_path = ""
yes_i_know = False
recids = None
with_docname = None
with_doctype = None
with_docformat = None
arXiv = None
clean = False
refno_url = CFG_SITE_URL
skip_refno = False
upload_mode = 'append'
for opt, arg in opts:
if opt in ['-h', '--' + help_param]:
usage()
sys.exit()
elif opt in ['-t', '--' + tarball_param]:
tarball = arg
elif opt in ['-d', '--' + tardir_param]:
tdir = arg
elif opt in ['-i', '--' + infile_param]:
infile = arg
elif opt in ['-r', '--' + recid_param]:
recids = arg
elif opt in ['-a', '--' + arXiv_param]:
arXiv = arg
elif opt in ['--' + with_docname_param]:
with_docname = arg
elif opt in ['--' + with_doctype_param]:
with_doctype = arg
elif opt in ['--' + with_docformat_param]:
with_docformat = arg
elif opt in ['-s', '--' + sdir_param]:
sdir = arg
elif opt in ['-x', '--' + extract_text_param]:
xtract_text = True
elif opt in ['-f', '--' + force_param]:
force = True
elif opt in ['-u', '--' + upload_param]:
upload_plots = True
elif opt in ['--' + upload_mode_param]:
upload_mode = arg
elif opt in ['-q', '--' + squash_param]:
squash = True
elif opt in ['-y', '--' + yes_i_know_param]:
yes_i_know = True
elif opt in ['-c', '--' + clean_param]:
clean = True
elif opt in ['-l', '--' + refno_url_param]:
refno_url = arg
elif opt in ['-k', '--' + refno_param]:
skip_refno = True
else:
usage()
sys.exit()
allowed_upload_modes = ('insert', 'append', 'correct', 'replace')
if not upload_mode in allowed_upload_modes:
write_message('Specified upload mode %s is not valid. Must be in %s' % \
(upload_mode, ', '.join(allowed_upload_modes)))
usage()
sys.exit()
if sdir == None:
sdir = CFG_TMPSHAREDDIR
elif not os.path.isdir(sdir):
try:
os.makedirs(sdir)
except:
write_message('Error: We can\'t use this sdir. using ' + \
'CFG_TMPSHAREDDIR')
sdir = CFG_TMPSHAREDDIR
if skip_refno:
refno_url = ""
tars_and_gzips = []
if tarball != None:
tars_and_gzips.append(tarball)
if tdir != None:
filetypes = ['gzip compressed', 'tar archive', 'Tar archive'] # FIXME
write_message('Currently processing any tarballs in ' + tdir)
tars_and_gzips.extend(get_list_of_all_matching_files(tdir, filetypes))
if infile != None:
tars_and_gzips.extend(parse_and_download(infile, sdir))
if recids != None:
tars_and_gzips.extend(tarballs_by_recids(recids, sdir, with_docname, with_doctype, with_docformat))
if arXiv != None:
tars_and_gzips.extend(tarballs_by_arXiv_id([arXiv], sdir))
if tars_and_gzips == []:
write_message('Error: no tarballs to process!')
sys.exit(1)
if squash:
squash_fd, squash_path = mkstemp(suffix="_" + time.strftime("%Y%m%d%H%M%S") + ".xml", \
prefix="plotextractor_", dir=sdir)
os.write(squash_fd, '<?xml version="1.0" encoding="UTF-8"?>\n<collection>\n')
os.close(squash_fd)
for tarball in tars_and_gzips:
recid = None
if isinstance(tarball, tuple):
tarball, recid = tarball
process_single(tarball, sdir=sdir, xtract_text=xtract_text, \
upload_plots=upload_plots, force=force, squash=squash_path, \
yes_i_know=yes_i_know, refno_url=refno_url, \
clean=clean, recid=recid, upload_mode=upload_mode)
if squash:
squash_fd = open(squash_path, "a")
squash_fd.write("</collection>\n")
squash_fd.close()
write_message("generated %s" % (squash_path,))
if upload_plots:
upload_to_site(squash_path, yes_i_know, upload_mode)
def process_single(tarball, sdir=CFG_TMPSHAREDDIR, xtract_text=False, \
upload_plots=False, force=False, squash="", \
yes_i_know=False, refno_url="", \
clean=False, recid=None, upload_mode='append'):
"""
Processes one tarball end-to-end.
@param: tarball (string): the absolute location of the tarball we wish
to process
@param: sdir (string): where we should put all the intermediate files for
the processing. if you're uploading, this directory should be one
of the ones specified in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, else
the upload won't work
@param: xtract_text (boolean): true iff you want to run pdftotext on the
pdf versions of the tarfiles. this programme assumes that the pdfs
are named the same as the tarballs but with a .pdf extension.
@param: upload_plots (boolean): true iff you want to bibupload the plots
extracted by this process
@param: force (boolean): force creation of new xml file
@param: squash: write MARCXML output into a specified 'squash' file
instead of single files.
@param: yes_i_know: if True, no user interaction if upload_plots is True
@param: refno_url: URL to the invenio-instance to query for refno.
@param: clean: if True, everything except the original tarball, plots and
context- files will be removed
@param recid: the record ID linked to this tarball. Overrides C{refno_url}
@param upload_mode: the mode in which to call bibupload (when C{upload_plots}
is set to True.
@return: marc_name(string): path to generated marcxml file
"""
sub_dir, refno = get_defaults(tarball, sdir, refno_url, recid)
if not squash:
marc_name = os.path.join(sub_dir, '%s.xml' % (refno,))
if (force or not os.path.exists(marc_name)):
marc_fd = open(marc_name, 'w')
marc_fd.write('<?xml version="1.0" encoding="UTF-8"?>\n<collection>\n')
marc_fd.close()
else:
marc_name = squash
if xtract_text:
extract_text(tarball)
try:
extracted_files_list, image_list, tex_files = untar(tarball, sub_dir)
except Timeout:
write_message('Timeout during tarball extraction on %s' % (tarball,))
return
if tex_files == [] or tex_files == None:
write_message('%s is not a tarball' % (os.path.split(tarball)[-1],))
run_shell_command('rm -r %s', (sub_dir,))
return
converted_image_list = convert_images(image_list)
write_message('converted %d of %d images found for %s' % (len(converted_image_list), \
len(image_list), \
os.path.basename(tarball)))
extracted_image_data = []
for tex_file in tex_files:
# Extract images, captions and labels
partly_extracted_image_data = extract_captions(tex_file, sub_dir, \
converted_image_list)
if partly_extracted_image_data != []:
# Add proper filepaths and do various cleaning
cleaned_image_data = prepare_image_data(partly_extracted_image_data, \
tex_file, converted_image_list)
# Using prev. extracted info, get contexts for each image found
extracted_image_data.extend((extract_context(tex_file, cleaned_image_data)))
extracted_image_data = remove_dups(extracted_image_data)
if extracted_image_data == []:
write_message('No plots detected in %s' % (refno,))
else:
if refno_url == "":
refno = None
create_contextfiles(extracted_image_data)
marc_xml = create_MARC(extracted_image_data, tarball, refno)
if not squash:
marc_xml += "\n</collection>"
if marc_name != None:
marc_fd = open(marc_name, 'a')
marc_fd.write('%s\n' % (marc_xml,))
marc_fd.close()
if not squash:
write_message('generated %s' % (marc_name,))
if upload_plots:
upload_to_site(marc_name, yes_i_know, upload_mode)
if clean:
clean_up(extracted_files_list, image_list)
write_message('work complete on %s' % (os.path.split(tarball)[-1],))
return marc_name
def clean_up(extracted_files_list, image_list):
"""
Removes all the intermediate stuff.
@param: extracted_files_list ([string, string, ...]): list of all extracted files
@param: image_list ([string, string, ...]): list of the images to keep
"""
for extracted_file in extracted_files_list:
# Remove everything that is not in the image_list or is not a directory
if extracted_file not in image_list and extracted_file[-1] != os.sep:
run_shell_command('rm %s', (extracted_file,))
def get_defaults(tarball, sdir, refno_url, recid=None):
"""
A function for parameter-checking.
@param: tarball (string): the location of the tarball to be extracted
@param: sdir (string): the location of the scratch directory for untarring,
conversions, and the ultimate destination of the MARCXML
@param: refno_url (string): server location on where to look for refno
@param recid: (int) if set, overrides C{refno_url} and consider this record
@return sdir, refno (string, string): the same
arguments it was sent as is appropriate.
"""
if not sdir or recid:
# Missing sdir: using default directory: CFG_TMPDIR
sdir = CFG_TMPSHAREDDIR
else:
sdir = os.path.split(tarball)[0]
# make a subdir in the scratch directory for each tarball
sdir = make_single_directory(sdir, \
os.path.split(tarball)[-1] + '_' + PLOTS_DIR)
if recid:
refno = str(recid)
elif refno_url != "":
refno = get_reference_number(tarball, refno_url)
if refno == None:
refno = os.path.basename(tarball)
write_message('Error: can\'t find record id for %s' % (refno,))
else:
refno = os.path.basename(tarball)
write_message("Skipping ref-no check")
return sdir, refno
def get_reference_number(tarball, refno_url):
"""
Attempts to determine the reference number of the file by searching.
@param: tarball (string): the name of the tarball as downloaded from
arXiv
@param: refno_url (string): url of repository to check for a
reference number for this record. If not set; returns None
@return: refno (string): the reference number of the paper
"""
if refno_url:
server = InvenioConnector(refno_url)
# we just need the name of the file
tarball = os.path.split(tarball)[1]
prefix = '037__a:'
# the name right now looks like arXiv:hep-ph_9703009
# or arXiv:0910.0476
if tarball.startswith(ARXIV_HEADER):
if len(tarball.split('_')) > 1:
tarball = tarball.split(':')[1]
arXiv_record = tarball.replace('_', '/')
else:
arXiv_record = tarball
result = server.search(p=prefix + arXiv_record, of='id')
if len(result) == 0:
return None
return str(result[0])
arXiv_record = re.findall('(([a-zA-Z\\-]+/\\d+)|(\\d+\\.\\d+))', tarball)
if len(arXiv_record) > 1:
arXiv_record = arXiv_record[0]
result = server.search(p=prefix + arXiv_record, of='id')
if len(result) > 0:
return str(result[0])
tarball_mod = tarball.replace('_', '/')
arXiv_record = re.findall('(([a-zA-Z\\-]+/\\d+)|(\\d+\\.\\d+))', \
tarball_mod)
if len(arXiv_record) > 1:
arXiv_record = arXiv_record[0]
result = server.search(p=prefix + arXiv_record, of='id')
if len(result) > 0:
return str(result[0])
return None
def rotate_image(filename, line, sdir, image_list):
"""
Given a filename and a line, figure out what it is that the author
wanted to do wrt changing the rotation of the image and convert the
file so that this rotation is reflected in its presentation.
@param: filename (string): the name of the file as specified in the TeX
@param: line (string): the line where the rotate command was found
@output: the image file rotated in accordance with the rotate command
@return: True if something was rotated
"""
file_loc = get_image_location(filename, sdir, image_list)
degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line)
if len(degrees) < 1:
return False
degrees = degrees[0].split('=')[-1].strip()
if file_loc == None or file_loc == 'ERROR' or\
not re.match('-*\\d+', degrees):
return False
degrees = str(0 - int(degrees))
cmd_list = ['mogrify', '-rotate', degrees, file_loc]
dummy, dummy, cmd_err = run_process_with_timeout(cmd_list)
if cmd_err != '':
return True
else:
return True
def get_context(lines, backwards=False):
"""
Given a relevant string from a TeX file, this function will extract text
from it as far as it is deemed contextually relevant, either backwards or forwards
in the text. The level of relevance allowed is configurable. When it reaches some
point in the text that is determined to be out of scope from the current context,
like text that is identified as a new paragraph, a complex TeX structure
('/begin', '/end', etc.) etc., it will return the previously allocated text.
For use when extracting text with contextual value for an figure or plot.
@param lines (string): string to examine
@param reversed (bool): are we searching backwards?
@return context (string): extracted context
"""
tex_tag = re.compile(r".*\\(\w+).*")
sentence = re.compile(r"(?<=[.?!])[\s]+(?=[A-Z])")
context = []
word_list = lines.split()
if backwards:
word_list.reverse()
# For each word we do the following:
# 1. Check if we have reached word limit
# 2. If not, see if this is a TeX tag and see if its 'illegal'
# 3. Otherwise, add word to context
for word in word_list:
if len(context) >= CFG_PLOTEXTRACTOR_CONTEXT_WORD_LIMIT:
break
match = tex_tag.match(word)
if (match and match.group(1) in CFG_PLOTEXTRACTOR_DISALLOWED_TEX):
# TeX Construct matched, return
if backwards:
# When reversed we need to go back and
# remove unwanted data within brackets
temp_word = ""
while len(context):
temp_word = context.pop()
if '}' in temp_word:
break
break
context.append(word)
if backwards:
context.reverse()
text = " ".join(context)
sentence_list = sentence.split(text)
if backwards:
sentence_list.reverse()
if len(sentence_list) > CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT:
return " ".join(sentence_list[:CFG_PLOTEXTRACTOR_CONTEXT_SENTENCE_LIMIT])
else:
return " ".join(sentence_list)
def extract_context(tex_file, extracted_image_data):
"""
Given a .tex file and a label name, this function will extract the text before
and after for all the references made to this label in the text. The number
of characters to extract before and after is configurable.
@param tex_file (list): path to .tex file
@param extracted_image_data ([(string, string, list), ...]):
a list of tuples of images matched to labels and captions from
this document.
@return extracted_image_data ([(string, string, list, list),
(string, string, list, list),...)]: the same list, but now containing
extracted contexts
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
fd = open(tex_file)
lines = fd.read()
fd.close()
# Generate context for each image and its assoc. labels
new_image_data = []
for image, caption, label in extracted_image_data:
context_list = []
# Generate a list of index tuples for all matches
indicies = [match.span() \
for match in re.finditer(r"(\\(?:fig|ref)\{%s\})" % (re.escape(label),), \
lines)]
for startindex, endindex in indicies:
# Retrive all lines before label until beginning of file
i = startindex - CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
if i < 0:
text_before = lines[:startindex]
else:
text_before = lines[i:startindex]
context_before = get_context(text_before, backwards=True)
# Retrive all lines from label until end of file and get context
i = endindex + CFG_PLOTEXTRACTOR_CONTEXT_EXTRACT_LIMIT
text_after = lines[endindex:i]
context_after = get_context(text_after)
context_list.append(context_before + ' \\ref{' + label + '} ' + context_after)
new_image_data.append((image, caption, label, context_list))
return new_image_data
def extract_captions(tex_file, sdir, image_list, primary=True):
"""
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
@param: lines (list): list of lines of the TeX file
@param: tex_file (string): the name of the TeX file which mentions
the images
@param: sdir (string): path to current sub-directory
@param: image_list (list): list of images in tarball
@param: primary (bool): is this the primary call to extract_caption?
@return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
fd = open(tex_file)
lines = fd.readlines()
fd.close()
# possible figure lead-ins
figure_head = '\\begin{figure' # also matches figure*
figure_tail = '\\end{figure' # also matches figure*
picture_head = '\\begin{picture}'
displaymath_head = '\\begin{displaymath}'
subfloat_head = '\\subfloat'
subfig_head = '\\subfigure'
includegraphics_head = '\\includegraphics'
epsfig_head = '\\epsfig'
input_head = '\\input'
# possible caption lead-ins
caption_head = '\\caption'
figcaption_head = '\\figcaption'
label_head = '\\label'
rotate = 'rotate='
angle = 'angle='
eps_tail = '.eps'
ps_tail = '.ps'
doc_head = '\\begin{document}'
doc_tail = '\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
return extracted_image_data
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = line.find(figure_head)
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label, extracted_image_data, \
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail), \
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(\
intelligently_find_filenames(lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(\
intelligently_find_filenames(lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = str(filename)
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and not filename in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(line, TeX=True, \
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file != None and primary: #to kill recursion
extracted_image_data.extend(extract_captions(\
new_tex_file, sdir, \
image_list,
primary=False))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
#write_message('found picture tag')
#FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
#write_message('found displaymath tag')
#FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(open_curly_line, cap_begin, \
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line, \
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line, \
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line, \
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index = line_index + 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, \
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, \
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([line.find(figure_tail), line.find(doc_tail)])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label, extracted_image_data, \
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
def put_it_together(cur_image, caption, context, extracted_image_data, line_index, \
lines):
"""
Takes the current image(s) and caption(s) and assembles them into
something useful in the extracted_image_data list.
@param: cur_image (string || list): the image currently being dealt with, or
the list of images, in the case of subimages
@param: caption (string || list): the caption or captions currently in scope
@param: extracted_image_data ([(string, string), (string, string), ...]):
a list of tuples of images matched to captions from this document.
@param: line_index (int): the index where we are in the lines (for
searchback and searchforward purposes)
@param: lines ([string, string, ...]): the lines in the TeX
@return: (cur_image, caption, extracted_image_data): the same arguments it
was sent, processed appropriately
"""
if type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] == 'ERROR':
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
if image == 'ERROR':
cur_image[SUB_CAPTION_OR_IMAGE].remove(image)
if cur_image != '' and caption != '':
if type(cur_image) == list and type(caption) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '' and\
caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE],
caption[MAIN_CAPTION_OR_IMAGE],
context))
if type(cur_image[MAIN_CAPTION_OR_IMAGE]) == list:
# why is the main image a list?
# it's a good idea to attach the main caption to other
# things, but the main image can only be used once
cur_image[MAIN_CAPTION_OR_IMAGE] = ''
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
for index in \
range(len(cur_image[SUB_CAPTION_OR_IMAGE])):
if index < len(caption[SUB_CAPTION_OR_IMAGE]):
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
caption[SUB_CAPTION_OR_IMAGE][index]
else:
long_caption = \
caption[MAIN_CAPTION_OR_IMAGE] + ' : ' + \
'Caption not extracted'
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE][index],
long_caption, context))
else:
long_caption = caption[MAIN_CAPTION_OR_IMAGE] + \
' : ' + caption[SUB_CAPTION_OR_IMAGE]
for sub_image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append(
(sub_image, long_caption, context))
else:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for sub_cap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ' : ' + sub_cap
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], long_caption, context))
else:
#wtf are they lists for?
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE],
caption[SUB_CAPTION_OR_IMAGE], context))
elif type(cur_image) == list:
if cur_image[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
for image in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((image, caption, context))
else:
extracted_image_data.append(
(cur_image[SUB_CAPTION_OR_IMAGE], caption, context))
elif type(caption) == list:
if caption[MAIN_CAPTION_OR_IMAGE] != '':
extracted_image_data.append(
(cur_image, caption[MAIN_CAPTION_OR_IMAGE], context))
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
# multiple caps for one image:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
if long_caption != '':
long_caption += ' : '
long_caption += subcap
extracted_image_data.append((cur_image, long_caption, context))
else:
extracted_image_data.append(
(cur_image, caption[SUB_CAPTION_OR_IMAGE]. context))
else:
extracted_image_data.append((cur_image, caption, context))
elif cur_image != '' and caption == '':
# we may have missed the caption somewhere.
REASONABLE_SEARCHBACK = 25
REASONABLE_SEARCHFORWARD = 5
curly_no_tag_preceding = '(?<!\\w){'
for searchback in range(REASONABLE_SEARCHBACK):
if line_index - searchback < 0:
continue
back_line = lines[line_index - searchback]
m = re.search(curly_no_tag_preceding, back_line)
if m != None:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(\
line_index - searchback, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, cap_begin, \
close_curly_line, close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
for searchforward in range(REASONABLE_SEARCHFORWARD):
if line_index + searchforward >= len(lines):
break
fwd_line = lines[line_index + searchforward]
m = re.search(curly_no_tag_preceding, fwd_line)
if m != None:
open_curly = m.start()
open_curly, open_curly_line, close_curly, \
close_curly_line = find_open_and_close_braces(\
line_index + searchforward, open_curly, '{', lines)
cap_begin = open_curly + 1
caption = assemble_caption(open_curly_line, \
cap_begin, close_curly_line, close_curly, lines)
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], caption, context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((sub_img, caption, context))
else:
extracted_image_data.append((cur_image, caption, context))
break
if caption == '':
if type(cur_image) == list:
extracted_image_data.append(
(cur_image[MAIN_CAPTION_OR_IMAGE], 'No caption found', context))
for sub_img in cur_image[SUB_CAPTION_OR_IMAGE]:
extracted_image_data.append((sub_img, 'No caption', context))
else:
extracted_image_data.append(
(cur_image, 'No caption found', context))
elif caption != '' and cur_image == '':
if type(caption) == list:
long_caption = caption[MAIN_CAPTION_OR_IMAGE]
for subcap in caption[SUB_CAPTION_OR_IMAGE]:
long_caption = long_caption + ': ' + subcap
else:
long_caption = caption
extracted_image_data.append(('', 'noimg' + long_caption, context))
# if we're leaving the figure, no sense keeping the data
cur_image = ''
caption = ''
return (cur_image, caption, extracted_image_data)
def intelligently_find_filenames(line, TeX=False, ext=False, commas_okay=False):
"""
Find the filename in the line. We don't support all filenames! Just eps
and ps for now.
@param: line (string): the line we want to get a filename out of
@return: filename ([string, ...]): what is probably the name of the file(s)
"""
files_included = ['ERROR']
if commas_okay:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+'
else:
valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+'
if ext:
valid_for_filename = valid_for_filename + '\.e*ps[texfi2]*'
if TeX:
valid_for_filename = valid_for_filename + '[\.latex]*'
file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line)
if len(file_inclusion) > 0:
# right now it looks like '=FILENAME,' or '=FILENAME '
for file_included in file_inclusion:
files_included.append(file_included[1:-1])
file_inclusion = re.findall('(?:[ps]*file=|figure=)' + \
valid_for_filename + '[,\\]} ]*', line)
if len(file_inclusion) > 0:
# still has the =
for file_included in file_inclusion:
part_before_equals = file_included.split('=')[0]
if len(part_before_equals) != file_included:
file_included = file_included[len(part_before_equals) + 1:].strip()
if not file_included in files_included:
files_included.append(file_included)
file_inclusion = re.findall('["\'{\\[]' + valid_for_filename + '[}\\],"\']', \
line)
if len(file_inclusion) > 0:
# right now it's got the {} or [] or "" or '' around it still
for file_included in file_inclusion:
file_included = file_included[1:-1]
file_included = file_included.strip()
if not file_included in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if not file_included in files_included:
files_included.append(file_included)
file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if not file_included in files_included:
files_included.append(file_included)
file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line)
if len(file_inclusion) > 0:
for file_included in file_inclusion:
file_included = file_included.strip()
if not file_included in files_included:
files_included.append(file_included)
if files_included != ['ERROR']:
files_included = files_included[1:] # cut off the dummy
for file_included in files_included:
if file_included == '':
files_included.remove(file_included)
if ' ' in file_included:
for subfile in file_included.split(' '):
if not subfile in files_included:
files_included.append(subfile)
if ',' in file_included:
for subfile in file_included.split(' '):
if not subfile in files_included:
files_included.append(subfile)
return files_included
def upload_to_site(marcxml, yes_i_know, upload_mode="append"):
"""
makes the appropriate calls to bibupload to get the MARCXML record onto
the site. Uploads in "correct" mode.
@param: marcxml (string): the absolute location of the MARCXML that was
generated by this programme
@param: yes_i_know (boolean): if true, no confirmation. if false, prompt.
@output: a new record on the invenio site
@return: None
"""
if not yes_i_know:
wait_for_user(wrap_text_in_a_box('You are going to upload new ' + \
'plots to the server.'))
task_low_level_submission('bibupload', 'admin', upload_mode and '--' + upload_mode or '', marcxml)
help_string = """
name: plotextractor
usage:
python plotextractor.py -d tar/dir -s scratch/dir
python plotextractor.py -i inputfile -u
python plotextractor.py --arXiv=arXiv_id
python plotextractor.py --recid=recids
example:
python plotextractor.py -d /some/path/with/tarballs
python plotextractor.py -i input.txt --no-sdir --extract-text
python plotextractor.py --arXiv=hep-ex/0101001
python plotextractor.py --recid=13-20,29
options:
-d, --tardir=
if you wish to do a batch of tarballs, search the tree
rooted at this directory for them
-s, --scratchdir=
the directory for scratchwork (untarring, conversion, etc.).
make sure that this directory is one of the allowed dirs in
CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS to avoid errors. with an
sdir selected, one xml file will be generated for the whole
batch of files processed, and it will live in this sdir.
-i, --input=
if you wish to give an input file for downloading files from
arXiv (or wherever), this is the pointer to that file, which
should contain urls to download, no more than 1 per line. each
line should be the url of a tarball or gzipped tarball, and
each downloaded item will then be processed.
-x, --extract-text
if there is a pdf with the same base name as the tarball for each
tarball this is being run on, running with the -x parameter will
run pdftotext on each of these pdfs and store the result in the
folder
-f, --force
if you want to overwrite everything that was done before, just
force the script to overwrite it. otherwise it will only run on
things that haven't been run on yet (for use with tardir).
-c, --clean
if you wish to do delete all non-essential files that were extracted.
-u, --call-bibupload, --yes-i-know
if you want to upload the plots, ask to call bibupload. appending
the --yes-i-know flag bypasses bibupload's prompt to upload
--upload-mode=
if you use --call-bibupload option, allows to specify in which
mode BibUpload should process the input. Can take values:
'insert', 'append', 'correct' or 'replace'
-l, --refno-url
Specify an URL to the invenio-instance to query for refno.
Defaults to CFG_SITE_URL.
-k, --skip-refno
allows you to skip any refno check
-r, --recid=
if you want to process the tarball of one recid, use this tag. it
will also accept ranges (i.e. --recid=13-20)
--with-docname=
allow to choose files to process on the basis of their docname,
when used with --recid option
--with-doctype=
allow to choose files to process on the basis of their doctype,
when used with --recid option
--with-docformat=
allow to choose files to process on the basis of their format,
when used with --recid option
-a, --arXiv=
if you want to process the tarball of one arXiv id, use this tag.
-t, --tarball=
for processing one tarball.
-q, --squash
if you want to squash all MARC into a single MARC file (for easier
and faster bibuploading)
-h, --help
Print this help and exit.
description: extracts plots from a tarfile from arXiv and generates
MARCXML that links figures and their captions. converts all
images to PNG format.
"""
def usage():
write_message(help_string)
if __name__ == '__main__':
main()
| gpl-2.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/lib_openshift/library/oc_version.py | 7 | 49326 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/version -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_version
short_description: Return the current openshift version
description:
- Return the openshift installed version. `oc version`
options:
state:
description:
- Currently list is only supported state.
required: true
default: list
choices: ["list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
oc_version:
- name: get oc version
oc_version:
register: oc_version
'''
# -*- -*- -*- End included fragment: doc/version -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_version.py -*- -*- -*-
def main():
''' ansible oc module for version '''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='list', type='str',
choices=['list']),
debug=dict(default=False, type='bool'),
),
supports_check_mode=True,
)
rval = OCVersion.run_ansible(module.params)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_version.py -*- -*- -*-
| apache-2.0 |
ropik/chromium | third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_SPARC_asm.py | 33 | 9068 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_SPARC_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004""", "BRIAN PAUL, IBM")
def printRealHeader(self):
print '#include "glapi/glapioffsets.h"'
print ''
print '#ifdef __arch64__'
print '#define GL_OFF(N)\t((N) * 8)'
print '#define GL_LL\t\tldx'
print '#define GL_TIE_LD(SYM)\t%tie_ldx(SYM)'
print '#define GL_STACK_SIZE\t128'
print '#else'
print '#define GL_OFF(N)\t((N) * 4)'
print '#define GL_LL\t\tld'
print '#define GL_TIE_LD(SYM)\t%tie_ld(SYM)'
print '#define GL_STACK_SIZE\t64'
print '#endif'
print ''
print '#define GLOBL_FN(x) .globl x ; .type x, @function'
print '#define HIDDEN(x) .hidden x'
print ''
print '\t.register %g2, #scratch'
print '\t.register %g3, #scratch'
print ''
print '\t.text'
print ''
print '\tGLOBL_FN(__glapi_sparc_icache_flush)'
print '\tHIDDEN(__glapi_sparc_icache_flush)'
print '\t.type\t__glapi_sparc_icache_flush, @function'
print '__glapi_sparc_icache_flush: /* %o0 = insn_addr */'
print '\tflush\t%o0'
print '\tretl'
print '\t nop'
print ''
print '\t.align\t32'
print ''
print '\t.type\t__glapi_sparc_get_pc, @function'
print '__glapi_sparc_get_pc:'
print '\tretl'
print '\t add\t%o7, %g2, %g2'
print '\t.size\t__glapi_sparc_get_pc, .-__glapi_sparc_get_pc'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL_FN(__glapi_sparc_get_dispatch)'
print '\tHIDDEN(__glapi_sparc_get_dispatch)'
print '__glapi_sparc_get_dispatch:'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\tadd\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsethi\t%tie_hi22(_glapi_tls_Dispatch), %g1'
print '\tadd\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'
print '\tGL_LL\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'
print '\tretl'
print '\t mov\t%g2, %o0'
print ''
print '\t.data'
print '\t.align\t32'
print ''
print '\t/* --> sethi %hi(_glapi_tls_Dispatch), %g1 */'
print '\t/* --> or %g1, %lo(_glapi_tls_Dispatch), %g1 */'
print '\tGLOBL_FN(__glapi_sparc_tls_stub)'
print '\tHIDDEN(__glapi_sparc_tls_stub)'
print '__glapi_sparc_tls_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\tadd\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsrl\t%g3, 10, %g3'
print '\tsethi\t%tie_hi22(_glapi_tls_Dispatch), %g1'
print '\tadd\t%g1, %tie_lo10(_glapi_tls_Dispatch), %g1'
print '\tGL_LL\t[%g2 + %g1], %g2, GL_TIE_LD(_glapi_tls_Dispatch)'
print '\tGL_LL\t[%g7+%g2], %g1'
print '\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '\t.size\t__glapi_sparc_tls_stub, .-__glapi_sparc_tls_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_tls_stub;\t\t\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#elif defined(PTHREADS)'
print ''
print '\t/* 64-bit 0x00 --> sethi %hh(_glapi_Dispatch), %g1 */'
print '\t/* 64-bit 0x04 --> sethi %lm(_glapi_Dispatch), %g2 */'
print '\t/* 64-bit 0x08 --> or %g1, %hm(_glapi_Dispatch), %g1 */'
print '\t/* 64-bit 0x0c --> sllx %g1, 32, %g1 */'
print '\t/* 64-bit 0x10 --> add %g1, %g2, %g1 */'
print '\t/* 64-bit 0x14 --> ldx [%g1 + %lo(_glapi_Dispatch)], %g1 */'
print ''
print '\t/* 32-bit 0x00 --> sethi %hi(_glapi_Dispatch), %g1 */'
print '\t/* 32-bit 0x04 --> ld [%g1 + %lo(_glapi_Dispatch)], %g1 */'
print ''
print '\t.data'
print '\t.align\t32'
print ''
print '\tGLOBL_FN(__glapi_sparc_pthread_stub)'
print '\tHIDDEN(__glapi_sparc_pthread_stub)'
print '__glapi_sparc_pthread_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\t add\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsethi\t%hi(_glapi_Dispatch), %g1'
print '\tor\t%g1, %lo(_glapi_Dispatch), %g1'
print '\tsrl\t%g3, 10, %g3'
print '\tGL_LL\t[%g2+%g1], %g2'
print '\tGL_LL\t[%g2], %g1'
print '\tcmp\t%g1, 0'
print '\tbe\t2f'
print '\t nop'
print '1:\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '2:\tsave\t%sp, GL_STACK_SIZE, %sp'
print '\tmov\t%g3, %l0'
print '\tcall\t_glapi_get_dispatch'
print '\t nop'
print '\tmov\t%o0, %g1'
print '\tmov\t%l0, %g3'
print '\tba\t1b'
print '\t restore %g0, %g0, %g0'
print '\t.size\t__glapi_sparc_pthread_stub, .-__glapi_sparc_pthread_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_pthread_stub;\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#else /* Non-threaded version. */'
print ''
print '\t.type __glapi_sparc_nothread_stub, @function'
print '__glapi_sparc_nothread_stub: /* Call offset in %g3 */'
print '\tmov\t%o7, %g1'
print '\tsethi\t%hi(_GLOBAL_OFFSET_TABLE_-4), %g2'
print '\tcall\t__glapi_sparc_get_pc'
print '\t add\t%g2, %lo(_GLOBAL_OFFSET_TABLE_+4), %g2'
print '\tmov\t%g1, %o7'
print '\tsrl\t%g3, 10, %g3'
print '\tsethi\t%hi(_glapi_Dispatch), %g1'
print '\tor\t%g1, %lo(_glapi_Dispatch), %g1'
print '\tGL_LL\t[%g2+%g1], %g2'
print '\tGL_LL\t[%g2], %g1'
print '\tGL_LL\t[%g1 + %g3], %g1'
print '\tjmp\t%g1'
print '\t nop'
print '\t.size\t__glapi_sparc_nothread_stub, .-__glapi_sparc_nothread_stub'
print ''
print '#define GL_STUB(fn, off)\t\t\t\\'
print '\tGLOBL_FN(fn);\t\t\t\t\\'
print 'fn:\tba\t__glapi_sparc_nothread_stub;\t\\'
print '\t sethi\tGL_OFF(off), %g3;\t\t\\'
print '\t.size\tfn,.-fn;'
print ''
print '#endif'
print ''
print '#define GL_STUB_ALIAS(fn, alias) \\'
print ' .globl fn; \\'
print ' .set fn, alias'
print ''
print '\t.text'
print '\t.align\t32'
print ''
print '\t.globl\tgl_dispatch_functions_start'
print '\tHIDDEN(gl_dispatch_functions_start)'
print 'gl_dispatch_functions_start:'
print ''
return
def printRealFooter(self):
print ''
print '\t.globl\tgl_dispatch_functions_end'
print '\tHIDDEN(gl_dispatch_functions_end)'
print 'gl_dispatch_functions_end:'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
print '\tGL_STUB(gl%s, _gloffset_%s)' % (name, f.name)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(gl%s)' % (name)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
if f.is_static_entry_point(f.name):
for n in f.entry_points:
if n != f.name:
text = '\tGL_STUB_ALIAS(gl%s, gl%s)' % (n, f.name)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
| bsd-3-clause |
ShiYw/Sigil | 3rdparty/python/Lib/lib2to3/fixes/fix_throw.py | 203 | 1582 | """Fixer for generator.throw(E, V, T).
g.throw(E) -> g.throw(E)
g.throw(E, V) -> g.throw(E(V))
g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
g.throw("foo"[, V[, T]]) will warn about string exceptions."""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
class FixThrow(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
>
|
power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Leave "g.throw(E)" alone
val = results.get("val")
if val is None:
return
val = val.clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
throw_args = results["args"]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
throw_args.replace(pytree.Node(syms.power, with_tb))
else:
throw_args.replace(Call(exc, args))
| gpl-3.0 |
davidbliu/maestro-nng | maestro/__main__.py | 1 | 4482 | #!/usr/bin/env python
# Copyright (C) 2013 SignalFuse, Inc.
#
# Docker container orchestration utility.
import argparse
import jinja2
import logging
import sys
import os
import yaml
from . import exceptions, maestro
from . import name, version
# Define the commands
ACCEPTED_COMMANDS = ['status', 'fullstatus', 'start', 'stop', 'restart',
'logs']
DEFAULT_MAESTRO_FILE = 'maestro.yaml'
def load_config(options):
"""Preprocess the input config file through Jinja2 before loading it as
JSON."""
if options.file == '-':
template = jinja2.Template(sys.stdin.read())
else:
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(options.file)),
extensions=['jinja2.ext.with_'])
template = env.get_template(os.path.basename(options.file))
return yaml.load(template.render(env=os.environ))
def create_parser():
parser = argparse.ArgumentParser(prog=name, description=(
'{} v{}, Docker container orchestrator.'.format(
name.title(), version)))
parser.add_argument('command', nargs='?',
choices=ACCEPTED_COMMANDS,
default='status',
help='orchestration command to execute')
parser.add_argument('things', nargs='*', metavar='thing',
help='container(s) or service(s) to act on')
parser.add_argument('-f', '--file', nargs='?', metavar='FILE',
default=DEFAULT_MAESTRO_FILE,
help=('read environment description from FILE ' +
'(use - for stdin)'))
parser.add_argument('-c', '--completion', metavar='CMD',
help=('list commands, services or containers in ' +
'environment based on CMD'))
parser.add_argument('-r', '--refresh-images', action='store_const',
const=True, default=False,
help='force refresh of container images from registry')
parser.add_argument('-F', '--follow', action='store_const',
const=True, default=False,
help='follow logs as they are generated')
parser.add_argument('-n', metavar='LINES', default=15,
help='Only show the last LINES lines for logs')
parser.add_argument('-o', '--only', action='store_const',
const=True, default=False,
help='only affect the selected container or service')
parser.add_argument('-v', '--version', action='store_const',
const=True, default=False,
help='show program version and exit')
return parser
def main(args=None):
print 'these are the args'
print args
print 'those are hello'
options = create_parser().parse_args(args)
# If the version is requested, show it and exit right away.
if options.version:
print('{}-{}'.format(name, version))
return 0
try:
config = load_config(options)
except jinja2.exceptions.TemplateNotFound:
logging.error('Environment description file %s not found!',
options.file)
sys.exit(1)
except:
logging.error('Error reading environment description file %s!',
options.file)
sys.exit(1)
# Shutup urllib3, wherever it comes from.
(logging.getLogger('requests.packages.urllib3.connectionpool')
.setLevel(logging.WARN))
(logging.getLogger('urllib3.connectionpool')
.setLevel(logging.WARN))
c = maestro.Conductor(config)
if options.completion is not None:
args = filter(lambda x: not x.startswith('-'),
options.completion.split(' '))
if len(args) == 2:
prefix = args[1]
choices = ACCEPTED_COMMANDS
elif len(args) >= 3:
prefix = args[len(args)-1]
choices = c.services + c.containers
else:
return 0
print(' '.join(filter(lambda x: x.startswith(prefix), set(choices))))
return 0
try:
options.things = set(options.things)
getattr(c, options.command)(**vars(options))
except exceptions.MaestroException as e:
sys.stderr.write('{}\n'.format(e))
return 1
except KeyboardInterrupt:
return 1
if __name__ == '__main__':
sys.exit(main())
| lgpl-3.0 |
chen2aaron/SnirteneCodes | Fragment/MySQLdb_Pra.py | 1 | 1034 | import MySQLdb
conn = MySQLdb.connect(host="localhost", user="root", passwd="123456",db="cc",port=3306,charset="utf8")
cur = conn.cursor()
# cur.execute("insert into users (username,password,email) values (%s,%s,%s)",("python","123456","[email protected]"))
# conn.commit()
# cur.executemany("insert into users (username,password,email) values (%s,%s,%s)",(("google","111222","[email protected]"),("facebook","222333","[email protected]"),("github","333444","[email protected]"),("docker","444555","[email protected]")))
# conn.commit()
cur.execute("select * from users")
lines = cur.fetchall()
for line in lines:
print line
cur.execute("select * from users where id=7")
line_first = cur.fetchone()
print line_first
print lines
print cur.fetchall()
cur.execute("select * from users")
print cur.fetchone()
print cur.fetchone()
print cur.fetchone()
print "--------------"
cur.scroll(1)
print cur.fetchone()
cur.scroll(-2)
print cur.fetchone()
cur.scroll(1,"absolute")
print cur.fetchone()
print cur.fetchone()
print cur.fetchone()
cur.close()
conn.close()
| gpl-2.0 |
rxuriguera/bibtexIndexMaker | src/bibim/ie/tests/test_context.py | 1 | 3474 |
# Copyright 2010 Ramon Xuriguera
#
# This file is part of BibtexIndexMaker.
#
# BibtexIndexMaker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BibtexIndexMaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BibtexIndexMaker. If not, see <http://www.gnu.org/licenses/>.
import unittest #@UnresolvedImport
from bibim.ie.context import ContextResolver
from bibim.util.beautifulsoup import BeautifulSoup
from bibim.util.helpers import ContentCleaner
html = """
<html>
<head></head>
<body>
<table>
<tr>
<td>Field 01:</td>
<td>Value 01</td>
</tr>
<tr>
<td>Field 02:</td>
<td>Value 02</td>
</tr>
<tr>
<td>Field 03 <sup>33</sup</td>
<td>Value 03</td>
</tr>
</table>
</body>
</html>
"""
class ContextResolverTest(unittest.TestCase):
def setUp(self):
self.cr = ContextResolver()
self.soup = ContentCleaner().clean_content(html)
self.element01 = self.soup.find('td', text='Value 01').parent
self.element02 = self.soup.find('td', text='Value 03').parent
def tearDown(self):
pass
def test_get_context(self):
context = self.cr.get_context(self.element01)
self.failUnless(context[u'Field 01:'] == 1)
def test_get_tree_context(self):
context = self.cr.get_context(self.element02)
self.failUnless(context[u'Field 03'] == 1)
self.failUnless(context[u'33'] == 1)
def test_merge_contexts(self):
context01 = {u'Field 01:':1}
context02 = {u'Field 01:':3, u'Field 02:':1, u'Field 03:':4}
merged = self.cr.merge_context(context01, context02)
self.failUnless(merged == {u'Field 02:': 1, u'Field 01:': 4,
u'Field 03:': 4})
def test_clean_context(self):
context = {'a':2, 'b':3, 'c':1,
'this string is quite long. yes indeed':4}
result = self.cr.clean_context(context)
self.failUnless(result == {'a':2, 'b':3})
def test_get_top_words(self):
context = {u'a':3, 'b':5, 'c':1, u'd':2, 'e':4}
expected = ['b', 'e', u'a']
result = self.cr.get_top_strings(context, 3)
self.failUnless(result == expected)
def test_check_context(self):
context01 = {'a':3, 'b':5, 'c':1, 'd':2, 'e':4}
context02 = {'a':1, 'x':3}
result = self.cr.check_context(context01, context02)
self.failUnless(result)
context02 = {'x':3}
result = self.cr.check_context(context01, context02)
self.failIf(result)
context01 = {}
result = self.cr.check_context(context01, context02)
self.failUnless(result)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-3.0 |
Modified-MW-DF/modified-MDF | MWDF Project/MasterworkDwarfFortress/Utilities/Quickfort/src/qfconvert/xml2obj.py | 3 | 3404 | ## {{{ http://code.activestate.com/recipes/534109/ (r8)
import re
import xml.sax.handler
def xml2obj(src):
"""
A simple function to converts XML data into native Python object.
"""
non_id_char = re.compile('[^_0-9a-zA-Z]')
def _name_mangle(name):
return non_id_char.sub('_', name)
class DataNode(object):
def __init__(self):
self._attrs = {} # XML attributes and child elements
self.data = None # child text data
def __len__(self):
# treat single element as a list of 1
return 1
def __getitem__(self, key):
if isinstance(key, basestring):
return self._attrs.get(key, None)
else:
return [self][key]
def __contains__(self, name):
return name in self._attrs
def __nonzero__(self):
return bool(self._attrs or self.data)
def __getattr__(self, name):
if name.startswith('__'):
# need to do this for Python special methods???
raise AttributeError(name)
return self._attrs.get(name, None)
def _add_xml_attr(self, name, value):
if name in self._attrs:
# multiple attribute of the same name are represented by a list
children = self._attrs[name]
if not isinstance(children, list):
children = [children]
self._attrs[name] = children
children.append(value)
else:
self._attrs[name] = value
def __str__(self):
return self.data or ''
def __repr__(self):
items = sorted(self._attrs.items())
if self.data:
items.append(('data', self.data))
return u'{%s}' % ', '.join(
[u'%s:%s' % (k, repr(v)) for k, v in items]
)
class TreeBuilder(xml.sax.handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = DataNode()
self.current = self.root
self.text_parts = []
def startElement(self, name, attrs):
self.stack.append((self.current, self.text_parts))
self.current = DataNode()
self.text_parts = []
# xml attributes --> python attributes
for k, v in attrs.items():
self.current._add_xml_attr(_name_mangle(k), v)
def endElement(self, name):
text = ''.join(self.text_parts).strip()
if text:
self.current.data = text
if self.current._attrs:
obj = self.current
else:
# a text only node is simply represented by the string
obj = text or ''
self.current, self.text_parts = self.stack.pop()
self.current._add_xml_attr(_name_mangle(name), obj)
def characters(self, content):
self.text_parts.append(content)
builder = TreeBuilder()
if isinstance(src, basestring):
xml.sax.parseString(src, builder)
else:
xml.sax.parse(src, builder)
return builder.root._attrs.values()[0]
## end of http://code.activestate.com/recipes/534109/ }}}
| mit |
shsingh/ansible | lib/ansible/modules/cloud/misc/cloud_init_data_facts.py | 101 | 3392 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloud_init_data_facts
short_description: Retrieve facts of cloud-init.
description:
- Gathers facts by reading the status.json and result.json of cloud-init.
version_added: 2.6
author: René Moser (@resmo)
options:
filter:
description:
- Filter facts
choices: [ status, result ]
notes:
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
'''
EXAMPLES = '''
- name: Gather all facts of cloud init
cloud_init_data_facts:
register: result
- debug:
var: result
- name: Wait for cloud init to finish
cloud_init_data_facts:
filter: status
register: res
until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
retries: 50
delay: 5
'''
RETURN = '''
---
cloud_init_data_facts:
description: Facts of result and status.
returned: success
type: dict
sample: '{
"status": {
"v1": {
"datasource": "DataSourceCloudStack",
"errors": []
},
"result": {
"v1": {
"datasource": "DataSourceCloudStack",
"init": {
"errors": [],
"finished": 1522066377.0185432,
"start": 1522066375.2648022
},
"init-local": {
"errors": [],
"finished": 1522066373.70919,
"start": 1522066373.4726632
},
"modules-config": {
"errors": [],
"finished": 1522066380.9097016,
"start": 1522066379.0011985
},
"modules-final": {
"errors": [],
"finished": 1522066383.56594,
"start": 1522066382.3449218
},
"stage": null
}
}'
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
CLOUD_INIT_PATH = "/var/lib/cloud/data/"
def gather_cloud_init_data_facts(module):
res = {
'cloud_init_data_facts': dict()
}
for i in ['result', 'status']:
filter = module.params.get('filter')
if filter is None or filter == i:
res['cloud_init_data_facts'][i] = dict()
json_file = CLOUD_INIT_PATH + i + '.json'
if os.path.exists(json_file):
f = open(json_file, 'rb')
contents = to_text(f.read(), errors='surrogate_or_strict')
f.close()
if contents:
res['cloud_init_data_facts'][i] = module.from_json(contents)
return res
def main():
module = AnsibleModule(
argument_spec=dict(
filter=dict(choices=['result', 'status']),
),
supports_check_mode=True,
)
facts = gather_cloud_init_data_facts(module)
result = dict(changed=False, ansible_facts=facts, **facts)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jit/pyew | vstruct/unittest.py | 17 | 2352 |
import vstruct
from cStringIO import StringIO
from vstruct.primitives import *
def test(vs, hexstr):
vshex = vs.vsEmit().encode('hex')
if vshex != hexstr:
raise Exception('FAIL')
print 'PASS!'
v = vstruct.VStruct()
v.uint8 = v_uint8(1)
v.uint16 = v_uint16(2)
v.uint24 = v_uint24(3)
v.uint32 = v_uint32(4)
v.uint64 = v_uint64(5)
v.vbytes = v_bytes(vbytes='ABCD')
test(v,'01020003000004000000050000000000000041424344')
print v.tree()
v.uint8 = 99
v.uint16 = 100
v.uint24 = 101
v.uint32 = 102
v.uint64 = 103
v.vbytes = '\x00\x00\x00\x00'
test(v,'63640065000066000000670000000000000000000000')
print v.tree()
# =================================================================
v = vstruct.VStruct()
v._vs_field_align = True
v.uint8 = v_uint8(0x42, bigend=True)
v.uint16 = v_uint16(0x4243, bigend=True)
v.uint24 = v_uint24(0x424344, bigend=True)
v.uint32 = v_uint32(0x42434445, bigend=True)
v.uint64 = v_uint64(0x4243444546474849, bigend=True)
test(v, '420042430000424344000000424344454243444546474849')
print v.tree()
# ===============================================================
v = vstruct.VStruct()
v.strfield = v_str(size=30)
v.unifield = v_wstr(size=30)
v.strfield = 'wootwoot!'
v.unifield = 'bazbaz'
test(v, '776f6f74776f6f7421000000000000000000000000000000000000000000620061007a00620061007a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000')
print v.tree()
v.vsParse('B'*90)
# ===============================================================
def updatelen(vs):
vs.vsGetField('strfield').vsSetLength(vs.lenfield)
v = vstruct.VStruct()
v.lenfield = v_uint8(0x30)
v.strfield = v_str(size=30)
v.vsAddParseCallback('lenfield', updatelen)
v.vsParse('\x01' + 'A' * 30)
test(v, '0141')
print v.tree()
# ==============================================================
class woot(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.lenfield = v_uint8()
self.strfield = v_str(size=0x20)
def pcb_lenfield(self):
self.vsGetField('strfield').vsSetLength(self.lenfield)
v = woot()
v.vsParse('\x01' + 'A'*30)
test(v, '0141')
print v.tree()
# ==============================================================
v = woot()
sio = StringIO('\x01' + 'A' * 30)
v.vsParseFd(sio)
test(v, '0141')
print v.tree()
| gpl-2.0 |
ksh/gpirecertification | tests/functional/model_student_work.py | 7 | 3838 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models/review.py."""
__author__ = [
'[email protected] (John Cox)',
]
from models import entities
from models import models
from models import student_work
from tests.functional import actions
from google.appengine.ext import db
class ReferencedModel(entities.BaseEntity):
pass
class UnvalidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty()
class ValidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty(kind=ReferencedModel.kind())
class KeyPropertyTest(actions.TestBase):
"""Tests KeyProperty."""
def setUp(self): # From superclass. pylint: disable-msg=g-bad-name
super(KeyPropertyTest, self).setUp()
self.referenced_model_key = ReferencedModel().put()
def test_validation_and_datastore_round_trip_of_keys_succeeds(self):
"""Tests happy path for both validation and (de)serialization."""
model_with_reference = ValidatedReference(
referenced_model_key=self.referenced_model_key)
model_with_reference_key = model_with_reference.put()
model_with_reference_from_datastore = db.get(model_with_reference_key)
self.assertEqual(
self.referenced_model_key,
model_with_reference_from_datastore.referenced_model_key)
custom_model_from_datastore = db.get(
model_with_reference_from_datastore.referenced_model_key)
self.assertEqual(
self.referenced_model_key, custom_model_from_datastore.key())
self.assertTrue(isinstance(
model_with_reference_from_datastore.referenced_model_key,
db.Key))
def test_type_not_validated_if_kind_not_passed(self):
model_key = db.Model().put()
unvalidated = UnvalidatedReference(referenced_model_key=model_key)
self.assertEqual(model_key, unvalidated.referenced_model_key)
def test_validation_fails(self):
model_key = db.Model().put()
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key='not_a_key')
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key=model_key)
class ReviewTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewer_key = models.Student(key_name='[email protected]').put()
review_key = student_work.Review(
reviewer_key=reviewer_key, unit_id=unit_id).put()
self.assertEqual(
student_work.Review.key_name(unit_id, reviewer_key),
review_key.name())
class SubmissionTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewee_key = models.Student(key_name='[email protected]').put()
review_key = student_work.Submission(
reviewee_key=reviewee_key, unit_id=unit_id).put()
self.assertEqual(
student_work.Submission.key_name(unit_id, reviewee_key),
review_key.name())
| apache-2.0 |
eprivalov/sendec | loginsys/forms.py | 1 | 2847 | from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib import auth
class UserCreationFormNew(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'placeholder': 'Password',
'required': 'required'
}))
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput(attrs={
'class': 'form-control',
'required': 'required',
'placeholder': 'Confirm password'
}),
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationFormNew, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserAuthenticationForm(forms.ModelForm):
class Meta:
model = User
fields = ()
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.CharField(label=_("Username"),
widget=forms.TextInput(attrs={
'class': 'form-control',
'required': 'required',
'name': 'username',
'placeholder': "Username"
}))
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput(attrs={
'class': 'form-control',
'required': 'required',
'placeholder': 'Password',
'name': 'password'
}))
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = auth.authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Sorry, that login was invalid. Please try again.")
return self.cleaned_data
| apache-2.0 |
nadeemsyed/swift | test/unit/common/middleware/test_quotas.py | 8 | 15565 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.swob import Request, HTTPUnauthorized, HTTPOk
from swift.common.middleware import container_quotas, copy
from test.unit.common.middleware.helpers import FakeSwift
class FakeCache(object):
def __init__(self, val):
if 'status' not in val:
val['status'] = 200
self.val = val
def get(self, *args):
return self.val
class FakeApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
start_response('200 OK', [])
return []
class FakeMissingApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
start_response('404 Not Found', [])
return []
def start_response(*args):
pass
class TestContainerQuotas(unittest.TestCase):
def test_split_path_empty_container_path_segment(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a//something/something_else',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': {'key': 'value'}})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_not_handled(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_no_quotas(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': FakeCache({}),
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_not_exceed_bytes_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_exceed_counts_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_not_exceed_counts_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
def test_invalid_quotas(self):
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': 'abc'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 400)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_COUNT': 'abc'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 400)
def test_valid_quotas(self):
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': '123'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 200)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_COUNT': '123'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 200)
def test_delete_quotas(self):
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': None})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEqual(res.status_int, 200)
def test_missing_container(self):
app = container_quotas.ContainerQuotaMiddleware(FakeMissingApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 404)
def test_auth_fail(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'},
'write_acl': None})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.authorize': lambda *args: HTTPUnauthorized()})
res = req.get_response(app)
self.assertEqual(res.status_int, 401)
class ContainerQuotaCopyingTestCases(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.cq_filter = container_quotas.filter_factory({})(self.app)
self.copy_filter = copy.filter_factory({})(self.cq_filter)
def test_exceed_bytes_quota_copy_verb(self):
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_not_exceed_bytes_quota_copy_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_exceed_counts_quota_copy_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_exceed_counts_quota_copy_cross_account_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')
a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'},
'status': 200, 'object_count': 1}
a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'},
'status': 200, 'object_count': 1}
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.infocache': {
'container/a/c': a_c_cache,
'container/a2/c': a2_c_cache}},
headers={'Destination': '/c/o',
'Destination-Account': 'a2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_exceed_counts_quota_copy_cross_account_PUT_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {}, 'passed')
a_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '2'},
'status': 200, 'object_count': 1}
a2_c_cache = {'storage_policy': '0', 'meta': {'quota-count': '1'},
'status': 200, 'object_count': 1}
req = Request.blank('/v1/a2/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.infocache': {
'container/a/c': a_c_cache,
'container/a2/c': a2_c_cache}},
headers={'X-Copy-From': '/c2/o2',
'X-Copy-From-Account': 'a'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_exceed_bytes_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_not_exceed_bytes_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_bytes_quota_copy_from_no_src(self):
self.app.register('GET', '/v1/a/c2/o3', HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o3'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_bytes_quota_copy_from_bad_src(self):
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': 'bad_path'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 412)
def test_exceed_counts_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, 'Upload exceeds quota.')
def test_not_exceed_counts_quota_copy_from(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache},
headers={'x-copy-from': '/c2/o2'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
def test_not_exceed_counts_quota_copy_verb(self):
self.app.register('GET', '/v1/a/c2/o2', HTTPOk,
{'Content-Length': '10'}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', HTTPOk, {}, 'passed')
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
'swift.cache': cache},
headers={'Destination': '/c/o'})
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dancingdan/tensorflow | tensorflow/compiler/tests/pooling_ops_test.py | 22 | 20324 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(xla_test.XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.cached_session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(xla_test.XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
pool_grad_grad_func: Second-order gradient function, if available.
"""
total_size = np.prod(input_sizes)
# TODO(b/73062247): MaxPoolGradGrad can confuse gradients when x is equally
# maximal at 16 bits. Switch to np.random.randn when resolved.
x = np.arange(1, total_size + 1, dtype=np.float32)
x *= (np.random.randint(2, size=total_size) * 2 - 1) # Flip signs randomly
# Verify some specifically interesting values...
x[np.random.choice(total_size)] = np.inf
x[np.random.choice(total_size)] = -np.inf
# TODO(b/74222344): Fix nan handling for max pool grad.
# x[np.random.choice(total_size)] = np.nan
x = x.reshape(input_sizes)
with self.cached_session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_output_grad_gradients = output_grad_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_output_grad_gradients = NHWCToNCHW(output_grad_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
xla_inputs,
xla_outputs,
xla_output_grad_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_grad_gradients = NCHWToNHWC(actual_grad_gradients)
actual_input_gradients_vals = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals,
actual_input_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_input_gradients_vals, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def _VerifyValues(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
data_format,
pool_grad_grad_func=pool_grad_grad_func)
def _TestPooling(self, forward_op, backward_op, pool_grad_grad_func=None):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
pool_grad_grad_func=pool_grad_grad_func)
def testMaxPool(self):
self._TestPooling(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
pool_grad_grad_func=gen_nn_ops.max_pool_grad_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops.avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops.max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
a-doumoulakis/tensorflow | tensorflow/examples/tutorials/mnist/mnist_deep.py | 33 | 6130 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/requests-2.2.1-py2.7.egg/requests/packages/urllib3/response.py | 316 | 10537 | # urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
| gpl-2.0 |
jhoos/django | tests/unmanaged_models/tests.py | 296 | 2174 | from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(table, tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
| bsd-3-clause |
UASLab/ImageAnalysis | video/import_apt.py | 1 | 1267 | #!/usr/bin/env python3
import argparse
import gzip
argparser = argparse.ArgumentParser(description='import apt.dat.gz from FlightGear')
argparser.add_argument('--file', help='fgfs apt.dat.gz file')
args = argparser.parse_args()
ft2m = 0.3048
ident = ''
alt = ''
count = 0
lat_sum = 0
lon_sum = 0
print 'Ident,Lat,Lon,Alt'
with gzip.open(args.file, 'rb') as f:
for line in f:
tokens = line.split()
#print tokens
if len(tokens) and tokens[0] == '1':
# start of apt record
if count > 0:
# output last record
print '%s,%.8f,%.8f,%.0f' % (ident, lat_sum / count,
lon_sum / count, alt)
ident = tokens[4]
alt = float(tokens[1]) * ft2m
count = 0
lat_sum = 0
lon_sum = 0
elif len(tokens) and tokens[0] == '100':
# basic data
lat_sum += float(tokens[9])
lon_sum += float(tokens[10])
lat_sum += float(tokens[18])
lon_sum += float(tokens[19])
count += 2
if count > 0:
# output last record
print '%s,%.8f,%.8f,%.0f' % (ident, lat_sum / count,
lon_sum / count, alt)
| mit |
viz-dev/viz | qa/rpc-tests/mempool_reorg.py | 41 | 4514 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| mit |
gurneyalex/odoo | addons/event_sale/__manifest__.py | 5 | 1311 | # -*- coding: utf-8 -*-
{
'name': 'Events Sales',
'version': '1.1',
'category': 'Marketing/Events',
'website': 'https://www.odoo.com/page/events',
'description': """
Creating registration with sales orders.
========================================
This module allows you to automate and connect your registration creation with
your main sale flow and therefore, to enable the invoicing feature of registrations.
It defines a new kind of service products that offers you the possibility to
choose an event category associated with it. When you encode a sales order for
that product, you will be able to choose an existing event of that category and
when you confirm your sales order it will automatically create a registration for
this event.
""",
'depends': ['event', 'sale_management'],
'data': [
'views/assets.xml',
'views/event_views.xml',
'views/product_views.xml',
'views/sale_order_views.xml',
'data/event_sale_data.xml',
'report/event_event_templates.xml',
'security/ir.model.access.csv',
'security/event_security.xml',
'wizard/event_edit_registration.xml',
'wizard/event_configurator_views.xml',
],
'demo': ['data/event_demo.xml'],
'installable': True,
'auto_install': True
}
| agpl-3.0 |
OpenTrons/opentrons_sdk | api/src/opentrons/api/calibration.py | 1 | 11815 | import functools
import logging
from copy import copy
from typing import Optional
from opentrons.util import calibration_functions
from opentrons.config import feature_flags as ff
from opentrons.broker import Broker
from opentrons.types import Point, Mount, Location
from opentrons.protocol_api import labware
from opentrons.hardware_control import CriticalPoint, ThreadedAsyncLock
from .models import Container
from .util import robot_is_busy, RobotBusy
log = logging.getLogger(__name__)
VALID_STATES = {'probing', 'moving', 'ready'}
# This hack is because if you have an old container that uses Placeable with
# just one well, Placeable.wells() returns the Well rather than [Well].
# Passing the well as an argument, though, will always return the well.
def _well0(cont):
if isinstance(cont, labware.Labware):
return cont.wells()[0]
else:
return cont.wells(0)
def _home_if_first_call(func):
""" Decorator to make a function home if it is the first one called in
this session."""
@functools.wraps(func)
def decorated(*args, **kwargs):
self = args[0]
if not self._has_homed:
log.info("this is the first calibration action, homing")
self._hardware.home()
self._has_homed = True
return func(*args, **kwargs)
return decorated
class CalibrationManager(RobotBusy):
"""
Serves endpoints that are primarily used in
opentrons/app/ui/robot/api-client/client.js
"""
TOPIC = 'calibration'
def __init__(self, hardware, loop=None, broker=None, lock=None):
self._broker = broker or Broker()
self._hardware = hardware
self._loop = loop
self.state = None
self._lock = lock
self._has_homed = False
@property
def busy_lock(self) -> ThreadedAsyncLock:
return self._lock
def _set_state(self, state):
if state not in VALID_STATES:
raise ValueError(
'State {0} not in {1}'.format(state, VALID_STATES))
self.state = state
self._on_state_changed()
@robot_is_busy
@_home_if_first_call
def tip_probe(self, instrument):
inst = instrument._instrument
log.info('Probing tip with {}'.format(instrument.name))
self._set_state('probing')
if instrument._context:
instrument._context.location_cache = None
mount = Mount[instrument._instrument.mount.upper()]
assert instrument.tip_racks,\
'No known tipracks for {}'.format(instrument)
tip_length = inst._tip_length_for(
instrument.tip_racks[0]._container)
# TODO (tm, 2019-04-22): This warns "coroutine not awaited" in
# TODO: test. The test fixture probably needs to be modified to get
# TODO: a synchronous adapter instead of a raw hardware_control API
# finally:
measured_center = self._hardware.locate_tip_probe_center(
mount, tip_length)
else:
measured_center = calibration_functions.probe_instrument(
instrument=inst,
robot=inst.robot)
log.info('Measured probe top center: {0}'.format(measured_center))
if instrument._context:
self._hardware.update_instrument_offset(
Mount[instrument._instrument.mount.upper()],
from_tip_probe=measured_center)
config = self._hardware.config
else:
config = calibration_functions.update_instrument_config(
instrument=inst,
measured_center=measured_center)
log.info('New config: {0}'.format(config))
self._move_to_front(instrument)
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def pick_up_tip(self, instrument, container):
if not isinstance(container, Container):
raise ValueError(
'Invalid object type {0}. Expected models.Container'
.format(type(container)))
inst = instrument._instrument
log.info('Picking up tip from {} in {} with {}'.format(
container.name, container.slot, instrument.name))
self._set_state('moving')
if instrument._context:
with instrument._context.temp_connect(self._hardware):
loc = _well0(container._container)
instrument._context.location_cache =\
Location(self._hardware.gantry_position(
Mount[inst.mount.upper()],
critical_point=CriticalPoint.NOZZLE,
refresh=True),
loc)
loc_leg = _well0(container._container)
inst.pick_up_tip(loc_leg)
else:
inst.pick_up_tip(_well0(container._container))
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def drop_tip(self, instrument, container):
if not isinstance(container, Container):
raise ValueError(
'Invalid object type {0}. Expected models.Container'
.format(type(container)))
inst = instrument._instrument
log.info('Dropping tip from {} in {} with {}'.format(
container.name, container.slot, instrument.name))
self._set_state('moving')
if instrument._context:
with instrument._context.temp_connect(self._hardware):
instrument._context.location_cache = None
inst.drop_tip(_well0(container._container))
else:
inst.drop_tip(_well0(container._container))
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def return_tip(self, instrument):
inst = instrument._instrument
log.info('Returning tip from {}'.format(instrument.name))
self._set_state('moving')
if instrument._context:
with instrument._context.temp_connect(self._hardware):
instrument._context.location_cache = None
inst.return_tip()
else:
inst.return_tip()
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def move_to_front(self, instrument):
"""Public face of move_to_front"""
self._move_to_front(instrument)
def _move_to_front(self, instrument):
"""Private move_to_front that can be called internally"""
inst = instrument._instrument
log.info('Moving {}'.format(instrument.name))
self._set_state('moving')
if instrument._context:
current = self._hardware.gantry_position(
Mount[inst.mount.upper()],
critical_point=CriticalPoint.NOZZLE,
refresh=True)
dest = instrument._context.deck.position_for(5) \
.point._replace(z=150)
self._hardware.move_to(Mount[inst.mount.upper()],
current,
critical_point=CriticalPoint.NOZZLE)
self._hardware.move_to(Mount[inst.mount.upper()],
dest._replace(z=current.z),
critical_point=CriticalPoint.NOZZLE)
self._hardware.move_to(Mount[inst.mount.upper()],
dest, critical_point=CriticalPoint.NOZZLE)
else:
calibration_functions.move_instrument_for_probing_prep(
inst, inst.robot)
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def move_to(self, instrument, container):
if not isinstance(container, Container):
raise ValueError(
'Invalid object type {0}. Expected models.Container'
.format(type(container)))
inst = instrument._instrument
cont = container._container
target = _well0(cont).top()
log.info('Moving {} to {} in {}'.format(
instrument.name, container.name, container.slot))
self._set_state('moving')
if instrument._context:
with instrument._context.temp_connect(self._hardware):
instrument._context.location_cache = None
inst.move_to(target)
else:
inst.move_to(target)
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def jog(self, instrument, distance, axis):
inst = instrument._instrument
log.info('Jogging {} by {} in {}'.format(
instrument.name, distance, axis))
self._set_state('moving')
if instrument._context:
self._hardware.move_rel(
Mount[inst.mount.upper()], Point(**{axis: distance}))
else:
calibration_functions.jog_instrument(
instrument=inst,
distance=distance,
axis=axis,
robot=inst.robot)
self._set_state('ready')
@robot_is_busy
@_home_if_first_call
def home(self, instrument):
inst = instrument._instrument
log.info('Homing {}'.format(instrument.name))
self._set_state('moving')
if instrument._context:
with instrument._context.temp_connect(self._hardware):
instrument._context.location_cache = None
inst.home()
else:
inst.home()
self._set_state('ready')
@robot_is_busy
def home_all(self, instrument):
# NOTE: this only takes instrument as a param, because we need
# its reference to the ProtocolContext. This is code smell that
# will be removed once sessions are managed better
log.info('Homing via Calibration Manager')
self._set_state('moving')
if instrument._context:
with instrument._context.temp_connect(self._hardware):
instrument._context.home()
else:
self._hardware.home()
self._set_state('ready')
@robot_is_busy
def update_container_offset(self, container, instrument):
inst = instrument._instrument
log.info('Updating {} in {}'.format(container.name, container.slot))
if instrument._context:
if 'centerMultichannelOnWells' in container._container.quirks:
cp: Optional[CriticalPoint] = CriticalPoint.XY_CENTER
else:
cp = None
here = self._hardware.gantry_position(Mount[inst.mount.upper()],
critical_point=cp,
refresh=True)
# Reset calibration so we don’t actually calibrate the offset
# relative to the old calibration
container._container.set_calibration(Point(0, 0, 0))
if ff.calibrate_to_bottom() and not (
container._container.is_tiprack):
orig = _well0(container._container)._bottom().point
else:
orig = _well0(container._container)._top().point
delta = here - orig
labware.save_calibration(container._container, delta)
else:
inst.robot.calibrate_container_with_instrument(
container=container._container,
instrument=inst,
save=True
)
def _snapshot(self):
return {
'topic': CalibrationManager.TOPIC,
'name': 'state',
'payload': copy(self)
}
def _on_state_changed(self):
self._hardware._use_safest_height = (self.state in
['probing', 'moving'])
self._broker.publish(CalibrationManager.TOPIC, self._snapshot())
| apache-2.0 |
NeuralEnsemble/python-neo | neo/core/baseneo.py | 2 | 14038 | """
This module defines :class:`BaseNeo`, the abstract base class
used by all :module:`neo.core` classes.
"""
from copy import deepcopy
from datetime import datetime, date, time, timedelta
from decimal import Decimal
import logging
from numbers import Number
import numpy as np
ALLOWED_ANNOTATION_TYPES = (int, float, complex,
str, bytes,
type(None),
datetime, date, time, timedelta,
Number, Decimal,
np.number, np.bool_)
logger = logging.getLogger("Neo")
class MergeError(Exception):
pass
def _check_annotations(value):
"""
Recursively check that value is either of a "simple" type (number, string,
date/time) or is a (possibly nested) dict, list or numpy array containing
only simple types.
"""
if isinstance(value, np.ndarray):
if not issubclass(value.dtype.type, ALLOWED_ANNOTATION_TYPES):
raise ValueError("Invalid annotation. NumPy arrays with dtype %s"
"are not allowed" % value.dtype.type)
elif isinstance(value, dict):
for element in value.values():
_check_annotations(element)
elif isinstance(value, (list, tuple)):
for element in value:
_check_annotations(element)
elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):
raise ValueError("Invalid annotation. Annotations of type %s are not"
"allowed" % type(value))
def merge_annotation(a, b):
"""
First attempt at a policy for merging annotations (intended for use with
parallel computations using MPI). This policy needs to be discussed
further, or we could allow the user to specify a policy.
Current policy:
For arrays or lists: concatenate
For dicts: merge recursively
For strings: concatenate with ';'
Otherwise: fail if the annotations are not equal
"""
assert type(a) == type(b), 'type({}) {} != type({}) {}'.format(a, type(a),
b, type(b))
if isinstance(a, dict):
return merge_annotations(a, b)
elif isinstance(a, np.ndarray): # concatenate b to a
return np.append(a, b)
elif isinstance(a, list): # concatenate b to a
return a + b
elif isinstance(a, str):
if a == b:
return a
else:
return a + ";" + b
else:
assert a == b, '{} != {}'.format(a, b)
return a
def merge_annotations(A, *Bs):
"""
Merge two sets of annotations.
Merging follows these rules:
All keys that are in A or B, but not both, are kept.
For keys that are present in both:
For arrays or lists: concatenate
For dicts: merge recursively
For strings: concatenate with ';'
Otherwise: warn if the annotations are not equal
"""
merged = A.copy()
for B in Bs:
for name in B:
if name not in merged:
merged[name] = B[name]
else:
try:
merged[name] = merge_annotation(merged[name], B[name])
except BaseException as exc:
# exc.args += ('key %s' % name,)
# raise
merged[name] = "MERGE CONFLICT" # temporary hack
logger.debug("Merging annotations: A=%s Bs=%s merged=%s", A, Bs, merged)
return merged
def intersect_annotations(A, B):
"""
Identify common entries in dictionaries A and B
and return these in a separate dictionary.
Entries have to share key as well as value to be
considered common.
Parameters
----------
A, B : dict
Dictionaries to merge.
"""
result = {}
for key in set(A.keys()) & set(B.keys()):
v1, v2 = A[key], B[key]
assert type(v1) == type(v2), 'type({}) {} != type({}) {}'.format(v1, type(v1),
v2, type(v2))
if isinstance(v1, dict) and v1 == v2:
result[key] = deepcopy(v1)
elif isinstance(v1, str) and v1 == v2:
result[key] = A[key]
elif isinstance(v1, list) and v1 == v2:
result[key] = deepcopy(v1)
elif isinstance(v1, np.ndarray) and all(v1 == v2):
result[key] = deepcopy(v1)
return result
def _reference_name(class_name):
"""
Given the name of a class, return an attribute name to be used for
references to instances of that class.
For example, a Segment object has a parent Block object, referenced by
`segment.block`. The attribute name `block` is obtained by calling
`_container_name("Block")`.
"""
return class_name.lower()
def _container_name(class_name):
"""
Given the name of a class, return an attribute name to be used for
lists (or other containers) containing instances of that class.
For example, a Block object contains a list of Segment objects,
referenced by `block.segments`. The attribute name `segments` is
obtained by calling `_container_name_plural("Segment")`.
"""
return _reference_name(class_name) + 's'
class BaseNeo:
"""
This is the base class from which all Neo objects inherit.
This class implements support for universally recommended arguments,
and also sets up the :attr:`annotations` dict for additional arguments.
Each class can define one or more of the following class attributes:
:_parent_objects: Neo objects that can be parents of this
object. Note that no Neo object can have
more than one parent.
An instance attribute named
class.__name__.lower() will be automatically
defined to hold this parent and will be
initialized to None.
:_necessary_attrs: A list of tuples containing the attributes that the
class must have. The tuple can have 2-4 elements.
The first element is the attribute name.
The second element is the attribute type.
The third element is the number of dimensions
(only for numpy arrays and quantities).
The fourth element is the dtype of array
(only for numpy arrays and quantities).
This does NOT include the attributes holding the
parents or children of the object.
:_recommended_attrs: A list of tuples containing the attributes that
the class may optionally have. It uses the same
structure as :_necessary_attrs:
:_repr_pretty_attrs_keys_: The names of attributes printed when
pretty-printing using iPython.
The following helper properties are available:
:_parent_containers: The names of the container attributes used
to store :_parent_objects:
:parents: All objects that are parents of the current object.
:_all_attrs: All required and optional attributes.
:_necessary_attrs: + :_recommended_attrs:
The following "universal" methods are available:
:__init__: Grabs the universally recommended arguments :attr:`name`,
:attr:`file_origin`, and :attr:`description` and stores them as
attributes.
Also takes every additional argument (that is, every argument
that is not handled by :class:`BaseNeo` or the child class), and
puts in the dict :attr:`annotations`.
:annotate(**args): Updates :attr:`annotations` with keyword/value
pairs.
:merge(**args): Merge the contents of another object into this one.
The merge method implemented here only merges
annotations (see :merge_annotations:).
Subclasses should implementt their own merge rules.
:merge_annotations(**args): Merge the :attr:`annotations` of another
object into this one.
Each child class should:
0) describe its parents (if any) and attributes in the relevant
class attributes. :_recommended_attrs: should append
BaseNeo._recommended_attrs to the end.
1) call BaseNeo.__init__(self, name=name, description=description,
file_origin=file_origin, **annotations)
with the universal recommended arguments, plus optional annotations
2) process its required arguments in its __new__ or __init__ method
3) process its non-universal recommended arguments (in its __new__ or
__init__ method
Non-keyword arguments should only be used for required arguments.
The required and recommended arguments for each child class (Neo object)
are specified in the _necessary_attrs and _recommended_attrs attributes and
documentation for the child object.
"""
# these attributes control relationships, they need to be
# specified in each child class
# Parent objects whose children can have a single parent
_parent_objects = ()
# Attribute names corresponding to _parent_objects
_parent_attrs = ()
# Attributes that an instance is required to have defined
_necessary_attrs = ()
# Attributes that an instance may or may have defined
_recommended_attrs = (('name', str),
('description', str),
('file_origin', str))
# Attributes that are used for pretty-printing
_repr_pretty_attrs_keys_ = ("name", "description", "annotations")
def __init__(self, name=None, description=None, file_origin=None,
**annotations):
"""
This is the base constructor for all Neo objects.
Stores universally recommended attributes and creates
:attr:`annotations` from additional arguments not processed by
:class:`BaseNeo` or the child class.
"""
# create `annotations` for additional arguments
_check_annotations(annotations)
self.annotations = annotations
# these attributes are recommended for all objects.
self.name = name
self.description = description
self.file_origin = file_origin
# initialize parent containers
for parent in self._parent_containers:
setattr(self, parent, None)
def annotate(self, **annotations):
"""
Add annotations (non-standardized metadata) to a Neo object.
Example:
>>> obj.annotate(key1=value0, key2=value1)
>>> obj.key2
value2
"""
_check_annotations(annotations)
self.annotations.update(annotations)
def _has_repr_pretty_attrs_(self):
return any(getattr(self, k) for k in self._repr_pretty_attrs_keys_)
def _repr_pretty_attrs_(self, pp, cycle):
first = True
for key in self._repr_pretty_attrs_keys_:
value = getattr(self, key)
if value:
if first:
first = False
else:
pp.breakable()
with pp.group(indent=1):
pp.text("{}: ".format(key))
pp.pretty(value)
def _repr_pretty_(self, pp, cycle):
"""
Handle pretty-printing the :class:`BaseNeo`.
"""
pp.text(self.__class__.__name__)
if self._has_repr_pretty_attrs_():
pp.breakable()
self._repr_pretty_attrs_(pp, cycle)
@property
def _parent_containers(self):
"""
Containers for parent objects.
"""
return tuple([_reference_name(parent) for parent in
self._parent_objects])
@property
def parents(self):
"""
All parent objects storing the current object.
"""
return tuple([getattr(self, attr) for attr in
self._parent_containers])
@property
def _all_attrs(self):
"""
Returns a combination of all required and recommended
attributes.
"""
return self._necessary_attrs + self._recommended_attrs
def merge_annotations(self, *others):
"""
Merge annotations from the other object into this one.
Merging follows these rules:
All keys that are in the either object, but not both, are kept.
For keys that are present in both objects:
For arrays or lists: concatenate the two arrays
For dicts: merge recursively
For strings: concatenate with ';'
Otherwise: fail if the annotations are not equal
"""
other_annotations = [other.annotations for other in others]
merged_annotations = merge_annotations(self.annotations,
*other_annotations)
self.annotations.update(merged_annotations)
def merge(self, *others):
"""
Merge the contents of another object into this one.
See :meth:`merge_annotations` for details of the merge operation.
"""
self.merge_annotations(*others)
def set_parent(self, obj):
"""
Set the appropriate "parent" attribute of this object
according to the type of "obj"
"""
if obj.__class__.__name__ not in self._parent_objects:
raise TypeError("{} can only have parents of type {}, not {}".format(
self.__class__.__name__, self._parent_objects, obj.__class__.__name__))
loc = self._parent_objects.index(obj.__class__.__name__)
parent_attr = self._parent_attrs[loc]
setattr(self, parent_attr, obj)
| bsd-3-clause |
mattpap/sympy-polys | sympy/concrete/products.py | 1 | 3802 | from sympy.core import Expr, S, C, Mul, sympify
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(Expr):
"""Represents unevaluated product.
"""
def __new__(cls, term, *symbols, **assumptions):
term = sympify(term)
if term.is_Number:
if term is S.NaN:
return S.NaN
elif term is S.Infinity:
return S.NaN
elif term is S.NegativeInfinity:
return S.NaN
elif term is S.Zero:
return S.Zero
elif term is S.One:
return S.One
if len(symbols) == 1:
symbol = symbols[0]
if isinstance(symbol, C.Equality):
k = symbol.lhs
a = symbol.rhs.start
n = symbol.rhs.end
elif isinstance(symbol, (tuple, list)):
k, a, n = symbol
else:
raise ValueError("Invalid arguments")
k, a, n = map(sympify, (k, a, n))
if isinstance(a, C.Number) and isinstance(n, C.Number):
return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])
else:
raise NotImplementedError
obj = Expr.__new__(cls, **assumptions)
obj._args = (term, k, a, n)
return obj
@property
def term(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def lower(self):
return self._args[2]
@property
def upper(self):
return self._args[3]
def doit(self, **hints):
term = self.term
lower = self.lower
upper = self.upper
if hints.get('deep', True):
term = term.doit(**hints)
lower = lower.doit(**hints)
upper = upper.doit(**hints)
prod = self._eval_product(lower, upper, term)
if prod is not None:
return powsimp(prod)
else:
return self
def _eval_product(self, a, n, term):
from sympy import sum, Sum
k = self.index
if not term.has(k):
return term**(n-a+1)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
C_= poly.LC()
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a-r, n-a+1)
Q *= n - r
if len(all_roots) < poly.degree():
B = Product(quo(poly, Q.as_poly(k)), (k, a, n))
return poly.LC()**(n-a+1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(a, n, p)
q = self._eval_product(a, n, q)
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(a, n, t)
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
A, B = Mul(*exclude), Mul(*include)
return A * Product(B, (k, a, n))
elif term.is_Pow:
if not term.base.has(k):
s = sum(term.exp, (k, a, n))
if not isinstance(s, Sum):
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(a, n, term.base)
if p is not None:
return p**term.exp
def product(*args, **kwargs):
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| bsd-3-clause |
travisfcollins/gnuradio | gr-wxgui/python/wxgui/waterfall_window.py | 47 | 10668 | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1`301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 100
AVG_ALPHA_MIN_EXP, AVG_ALPHA_MAX_EXP = -3, 0
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'waterfall_rate', 30)
DEFAULT_COLOR_MODE = gr.prefs().get_string('wxgui', 'waterfall_color', 'rgb1')
DEFAULT_WIN_SIZE = (600, 300)
DIV_LEVELS = (1, 2, 5, 10, 20)
MIN_DYNAMIC_RANGE, MAX_DYNAMIC_RANGE = 10, 200
DYNAMIC_RANGE_STEP = 10.
COLOR_MODES = (
('RGB1', 'rgb1'),
('RGB2', 'rgb2'),
('RGB3', 'rgb3'),
('Gray', 'gray'),
)
##################################################
# Waterfall window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter and fft block chain.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = wx.BoxSizer(wx.VERTICAL)
control_box.AddStretchSpacer()
options_box = forms.static_box_sizer(
parent=self, sizer=control_box, label='Options',
bold=True, orient=wx.VERTICAL,
)
#average
forms.check_box(
sizer=options_box, parent=self, label='Average',
ps=parent, key=AVERAGE_KEY,
)
avg_alpha_text = forms.static_text(
sizer=options_box, parent=self, label='Avg Alpha',
converter=forms.float_converter(lambda x: '%.4f'%x),
ps=parent, key=AVG_ALPHA_KEY, width=50,
)
avg_alpha_slider = forms.log_slider(
sizer=options_box, parent=self,
min_exp=AVG_ALPHA_MIN_EXP,
max_exp=AVG_ALPHA_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=AVG_ALPHA_KEY,
)
for widget in (avg_alpha_text, avg_alpha_slider):
parent.subscribe(AVERAGE_KEY, widget.Enable)
widget.Enable(parent[AVERAGE_KEY])
#begin axes box
control_box.AddStretchSpacer()
axes_box = forms.static_box_sizer(
parent=self, sizer=control_box, label='Axes Options',
bold=True, orient=wx.VERTICAL,
)
#num lines buttons
forms.incr_decr_buttons(
parent=self, sizer=axes_box, label='Time Scale',
on_incr=self._on_incr_time_scale, on_decr=self._on_decr_time_scale,
)
#dyanmic range buttons
forms.incr_decr_buttons(
parent=self, sizer=axes_box, label='Dyn Range',
on_incr=self._on_incr_dynamic_range, on_decr=self._on_decr_dynamic_range,
)
#ref lvl buttons
forms.incr_decr_buttons(
parent=self, sizer=axes_box, label='Ref Level',
on_incr=self._on_incr_ref_level, on_decr=self._on_decr_ref_level,
)
#color mode
forms.drop_down(
parent=self, sizer=axes_box, width=100,
ps=parent, key=COLOR_MODE_KEY, label='Color',
choices=map(lambda x: x[1], COLOR_MODES),
labels=map(lambda x: x[0], COLOR_MODES),
)
#autoscale
forms.single_button(
parent=self, sizer=axes_box, label='Autoscale',
callback=self.parent.autoscale,
)
#clear
control_box.AddStretchSpacer()
forms.single_button(
parent=self, sizer=control_box, label='Clear',
callback=self._on_clear_button,
)
#run/stop
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Event handlers
##################################################
def _on_clear_button(self, event):
self.parent[NUM_LINES_KEY] = self.parent[NUM_LINES_KEY]
def _on_incr_dynamic_range(self, event):
self.parent[DYNAMIC_RANGE_KEY] = min(MAX_DYNAMIC_RANGE, common.get_clean_incr(self.parent[DYNAMIC_RANGE_KEY]))
def _on_decr_dynamic_range(self, event):
self.parent[DYNAMIC_RANGE_KEY] = max(MIN_DYNAMIC_RANGE, common.get_clean_decr(self.parent[DYNAMIC_RANGE_KEY]))
def _on_incr_ref_level(self, event):
self.parent[REF_LEVEL_KEY] = self.parent[REF_LEVEL_KEY] + self.parent[DYNAMIC_RANGE_KEY]/DYNAMIC_RANGE_STEP
def _on_decr_ref_level(self, event):
self.parent[REF_LEVEL_KEY] = self.parent[REF_LEVEL_KEY] - self.parent[DYNAMIC_RANGE_KEY]/DYNAMIC_RANGE_STEP
def _on_incr_time_scale(self, event):
old_rate = self.parent[FRAME_RATE_KEY]
self.parent[FRAME_RATE_KEY] *= 0.75
if self.parent[FRAME_RATE_KEY] < 1.0:
self.parent[FRAME_RATE_KEY] = 1.0
if self.parent[FRAME_RATE_KEY] == old_rate:
self.parent[DECIMATION_KEY] += 1
def _on_decr_time_scale(self, event):
old_rate = self.parent[FRAME_RATE_KEY]
self.parent[FRAME_RATE_KEY] *= 1.25
if self.parent[FRAME_RATE_KEY] == old_rate:
self.parent[DECIMATION_KEY] -= 1
##################################################
# Waterfall window with plotter and control panel
##################################################
class waterfall_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
real,
fft_size,
num_lines,
decimation_key,
baseband_freq,
sample_rate_key,
frame_rate_key,
dynamic_range,
ref_level,
average_key,
avg_alpha_key,
msg_key,
):
pubsub.pubsub.__init__(self)
#setup
self.samples = list()
self.real = real
self.fft_size = fft_size
#proxy the keys
self.proxy(MSG_KEY, controller, msg_key)
self.proxy(DECIMATION_KEY, controller, decimation_key)
self.proxy(FRAME_RATE_KEY, controller, frame_rate_key)
self.proxy(AVERAGE_KEY, controller, average_key)
self.proxy(AVG_ALPHA_KEY, controller, avg_alpha_key)
self.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.waterfall_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(False)
#plotter listeners
self.subscribe(COLOR_MODE_KEY, self.plotter.set_color_mode)
self.subscribe(NUM_LINES_KEY, self.plotter.set_num_lines)
#initialize values
self[DYNAMIC_RANGE_KEY] = dynamic_range
self[NUM_LINES_KEY] = num_lines
self[Y_DIVS_KEY] = 8
self[X_DIVS_KEY] = 8 #approximate
self[REF_LEVEL_KEY] = ref_level
self[BASEBAND_FREQ_KEY] = baseband_freq
self[COLOR_MODE_KEY] = COLOR_MODES[0][1]
self[COLOR_MODE_KEY] = DEFAULT_COLOR_MODE
self[RUNNING_KEY] = True
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
for key in (
DECIMATION_KEY, SAMPLE_RATE_KEY, FRAME_RATE_KEY,
BASEBAND_FREQ_KEY, X_DIVS_KEY, Y_DIVS_KEY, NUM_LINES_KEY,
): self.subscribe(key, self.update_grid)
#initial update
self.update_grid()
def set_callback(self,callb):
self.plotter.set_callback(callb)
def autoscale(self, *args):
"""
Autoscale the waterfall plot to the last frame.
Set the dynamic range and reference level.
Does not affect the current data in the waterfall.
"""
if not len(self.samples): return
min_level, max_level = common.get_min_max_fft(self.samples)
#set the range and level
self[DYNAMIC_RANGE_KEY] = common.get_clean_num(max_level - min_level)
self[REF_LEVEL_KEY] = DYNAMIC_RANGE_STEP*round(.5+max_level/DYNAMIC_RANGE_STEP)
def handle_msg(self, msg):
"""
Handle the message from the fft sink message queue.
If complex, reorder the fft samples so the negative bins come first.
If real, keep take only the positive bins.
Send the data to the plotter.
Args:
msg: the fft array as a character array
"""
if not self[RUNNING_KEY]: return
#convert to floating point numbers
self.samples = samples = numpy.fromstring(msg, numpy.float32)[:self.fft_size] #only take first frame
num_samps = len(samples)
#reorder fft
if self.real: samples = samples[:(num_samps+1)/2]
else: samples = numpy.concatenate((samples[num_samps/2+1:], samples[:(num_samps+1)/2]))
#plot the fft
self.plotter.set_samples(
samples=samples,
minimum=self[REF_LEVEL_KEY] - self[DYNAMIC_RANGE_KEY],
maximum=self[REF_LEVEL_KEY],
)
#update the plotter
self.plotter.update()
def update_grid(self, *args):
"""
Update the plotter grid.
This update method is dependent on the variables below.
Determine the x and y axis grid parameters.
The x axis depends on sample rate, baseband freq, and x divs.
The y axis depends on y per div, y divs, and ref level.
"""
#grid parameters
sample_rate = self[SAMPLE_RATE_KEY]
frame_rate = self[FRAME_RATE_KEY]
if frame_rate < 1.0 :
frame_rate = 1.0
baseband_freq = self[BASEBAND_FREQ_KEY]
num_lines = self[NUM_LINES_KEY]
y_divs = self[Y_DIVS_KEY]
x_divs = self[X_DIVS_KEY]
#determine best fitting x_per_div
if self.real: x_width = sample_rate/2.0
else: x_width = sample_rate/1.0
x_per_div = common.get_clean_num(x_width/x_divs)
#update the x grid
if self.real:
self.plotter.set_x_grid(
baseband_freq,
baseband_freq + sample_rate/2.0,
x_per_div, True,
)
else:
self.plotter.set_x_grid(
baseband_freq - sample_rate/2.0,
baseband_freq + sample_rate/2.0,
x_per_div, True,
)
#update x units
self.plotter.set_x_label('Frequency', 'Hz')
#update y grid
duration = float(num_lines)/frame_rate
y_per_div = common.get_clean_num(duration/y_divs)
self.plotter.set_y_grid(0, duration, y_per_div, True)
#update y units
self.plotter.set_y_label('Time', 's')
#update plotter
self.plotter.update()
| gpl-3.0 |
TestInABox/openstackinabox | openstackinabox/tests/utils/test_directory.py | 1 | 1798 | import os
import os.path
import ddt
import mock
from openstackinabox.tests.base import TestBase
from openstackinabox.utils import directory
@ddt.ddt
class TestTempDirectory(TestBase):
def setUp(self):
super(TestTempDirectory, self).setUp()
def tearDown(self):
super(TestTempDirectory, self).tearDown()
def test_initialization(self):
temp_dir = directory.TemporaryDirectory()
self.assertIsInstance(temp_dir.name, str)
self.assertIn(directory.TemporaryDirectory.__name__, repr(temp_dir))
def test_cleanup(self):
temp_dir = directory.TemporaryDirectory()
self.assertTrue(os.path.exists(temp_dir.name))
file_names = [temp_dir.name]
for x in range(10):
filename = '{0}/file_{1}'.format(
temp_dir.name,
x
)
with open(filename, 'w') as data_output:
data_output.write(str(os.urandom(8192)))
file_names.append(filename)
temp_dir.cleanup()
for name in file_names:
self.assertFalse(os.path.exists(name))
def test_del_cleanup_error(self):
with mock.patch(
'shutil.rmtree'
) as mock_rmtree:
mock_rmtree.side_effect = OSError('mock error')
temp_dir = directory.TemporaryDirectory()
temp_dir.cleanup()
def test_context(self):
temp_dir_name = None
temp_dir = directory.TemporaryDirectory()
with temp_dir as context:
self.assertEqual(id(temp_dir), id(context))
temp_dir_name = context.name
self.assertTrue(os.path.exists(temp_dir_name))
try:
self.assertFalse(os.path.exists(temp_dir_name))
except OSError:
pass
| apache-2.0 |
Jumpscale/ays9 | tests/test_services/test_directory_structure/actions.py | 1 | 3901 | def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Test the created directory structure is corrected after ays blueprint on a test repo
"""
import sys
import os
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
expected_actors = ['cockpittesting', 'datacenter', 'sshkey']
expected_files_per_actor = ['actions.py', 'actor.json', 'schema.capnp']
actor_missing_msg = 'Actor folder [%s] does not exist'
actor_file_missing_msg = 'File [%s] for actor [%s] is missing'
service_file_missing_msg = 'Service file [%s] is missing'
expected_services = {'datacenter!ovh_germany1':{
# Un-comment the following when enabling the _bp_related1.yaml blueprint
# 'cockpittesting!cockpitv1': {'files': ['data.json',
# 'schema.capnp',
# 'service.json']},
'files': ['data.json', 'schema.capnp', 'service.json']},
# Un-comment the following when enabling the _bp_related1.yaml blueprint
# 'datacenter!ovh_germany2': {'files': ['data.json',
# 'schema.capnp',
# 'service.json']},
'datacenter!ovh_germany3': {'cockpittesting!cockpitv2': {'files': ['data.json',
'schema.capnp',
'service.json']},
'files': ['data.json', 'schema.capnp', 'service.json']},
'sshkey!main': {'files': ['data.json', 'schema.capnp', 'service.json']}}
cwd = os.getcwd()
repos = []
repo_name = 'sample_repo1'
repo_path = j.sal.fs.joinPaths(j.dirs.CODEDIR, 'github/jumpscale/ays9/tests/%s' % repo_name)
repos.append(repo_name)
def check_service_dir(base_path, service):
for service_name, service_info in service.items():
if service_name != 'files':
path = j.sal.fs.joinPaths(base_path, service_name)
check_service_dir(path, service_info)
else:
for service_file in service['files']:
if not j.sal.fs.exists(j.sal.fs.joinPaths(base_path, service_file)):
failures.append(service_file_missing_msg % j.sal.fs.joinPaths(base_path, service_file))
try:
ays_client = j.clients.atyourservice.get().api.ays
blueprints = map(lambda item: item['name'], ays_client.listBlueprints(repo_name, query_params={'archived': False}).json())
for blueprint in blueprints:
ays_client.executeBlueprint(data={}, blueprint=blueprint, repository=repo_name)
# validate directory structure
for actor in expected_actors:
if not j.sal.fs.exists(j.sal.fs.joinPaths(repo_path, 'actors', actor)):
failures.append(actor_missing_msg % actor)
else:
for actor_file in expected_files_per_actor:
if not j.sal.fs.exists(j.sal.fs.joinPaths(repo_path, 'actors', actor, actor_file)):
failures.append(actor_file_missing_msg % (actor_file, actor))
for service_name, service_info in expected_services.items():
path = j.sal.fs.joinPaths(repo_path, 'services', service_name)
check_service_dir(path, service_info)
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
j.sal.fs.changeDir(cwd)
for repo in repos:
ays_client.destroyRepository(data={}, repository=repo)
| apache-2.0 |
MechanisM/bleach | bleach/tests/test_links.py | 3 | 4636 | from nose.tools import eq_
import urllib
from bleach import Bleach, url_re
b = Bleach()
class cleach(Bleach):
def filter_url(self, url):
return u'http://bouncer/?u=%s' % urllib.quote_plus(url)
c = cleach()
def test_url_re():
def no_match(s):
match = url_re.search(s)
if match:
assert not match, 'matched %s' % s[slice(*match.span())]
yield no_match, 'just what i am looking for...it'
def test_empty():
eq_('', b.linkify(''))
def test_simple_link():
eq_('a <a href="http://example.com" rel="nofollow">http://example.com</a> link',
b.linkify('a http://example.com link'))
eq_('a <a href="https://example.com" rel="nofollow">https://example.com</a> link',
b.linkify('a https://example.com link'))
def test_mangle_link():
eq_('<a href="http://bouncer/?u=http%3A%2F%2Fexample.com" rel="nofollow">http://example.com</a>',
c.linkify('http://example.com'))
def test_email_link():
eq_('a [email protected] mailto',
b.linkify('a [email protected] mailto'))
def test_tlds():
eq_('<a href="http://example.com" rel="nofollow">example.com</a>',
b.linkify('example.com'))
eq_('<a href="http://example.co.uk" rel="nofollow">example.co.uk</a>',
b.linkify('example.co.uk'))
eq_('<a href="http://example.edu" rel="nofollow">example.edu</a>',
b.linkify('example.edu'))
eq_('example.xxx', b.linkify('example.xxx'))
eq_(' brie', b.linkify(' brie'))
eq_('<a href="http://bit.ly/fun" rel="nofollow">bit.ly/fun</a>',
b.linkify('bit.ly/fun'))
def test_escaping():
eq_('< unrelated', b.linkify('< unrelated'))
def test_nofollow_off():
eq_('<a href="http://example.com">example.com</a>',
b.linkify(u'example.com', nofollow=False))
def test_link_in_html():
eq_('<i><a href="http://yy.com" rel="nofollow">http://yy.com</a></i>',
b.linkify('<i>http://yy.com</i>'))
eq_('<em><strong><a href="http://xx.com" rel="nofollow">http://xx.com</a></strong></em>',
b.linkify('<em><strong>http://xx.com</strong></em>'))
def test_links_https():
eq_('<a href="https://yy.com" rel="nofollow">https://yy.com</a>',
b.linkify('https://yy.com'))
def test_add_rel_nofollow():
"""Verify that rel="nofollow" is added to an existing link"""
eq_('<a href="http://yy.com" rel="nofollow">http://yy.com</a>',
b.linkify('<a href="http://yy.com">http://yy.com</a>'))
def test_url_with_path():
eq_('<a href="http://example.com/path/to/file" rel="nofollow">http://example.com/path/to/file</a>',
b.linkify('http://example.com/path/to/file'))
def test_link_ftp():
eq_('<a href="ftp://ftp.mozilla.org/some/file" rel="nofollow">ftp://ftp.mozilla.org/some/file</a>',
b.linkify('ftp://ftp.mozilla.org/some/file'))
def test_link_query():
eq_('<a href="http://xx.com/?test=win" rel="nofollow">http://xx.com/?test=win</a>',
b.linkify('http://xx.com/?test=win'))
eq_('<a href="http://xx.com/?test=win" rel="nofollow">xx.com/?test=win</a>',
b.linkify('xx.com/?test=win'))
eq_('<a href="http://xx.com?test=win" rel="nofollow">xx.com?test=win</a>',
b.linkify('xx.com?test=win'))
def test_link_fragment():
eq_('<a href="http://xx.com/path#frag" rel="nofollow">http://xx.com/path#frag</a>',
b.linkify('http://xx.com/path#frag'))
def test_link_entities():
eq_('<a href="http://xx.com/?a=1&b=2" rel="nofollow">http://xx.com/?a=1&b=2</a>',
b.linkify('http://xx.com/?a=1&b=2'))
def test_escaped_html():
"""If I pass in escaped HTML, it should probably come out escaped."""
s = '<em>strong</em>'
eq_(s, b.linkify(s))
# Not supported at this time
# TODO:
# - Can this pass eventually?
#def test_link_http_complete():
# eq_('<a href="https://user:[email protected]/x/y.exe?a=b&c=d&e#f">https://user:[email protected]/x/y.exe?a=b&c=d&e#f</a>',
# b.linkify('https://user:[email protected]/x/y.exe?a=b&c=d&e#f'))
def test_non_url():
"""document.vulnerable should absolutely not be linkified."""
s = 'document.vulnerable'
eq_(s, b.linkify(s))
def test_javascript_url():
"""javascript: urls should never be linkified."""
s = 'javascript:document.vulnerable'
eq_(s, b.linkify(s))
def test_unsafe_url():
"""Any unsafe char ({}[]<>, etc.) in the path should end URL scanning."""
eq_('All your{"<a href="http://xx.yy.com/grover.png" '
'rel="nofollow">xx.yy.com/grover.png</a>"}base are',
b.linkify('All your{"xx.yy.com/grover.png"}base are'))
| bsd-3-clause |
cjh1/VTK | Examples/GUI/Python/ImagePlaneWidget.py | 14 | 10228 | #!/usr/bin/env python
# This code is a direct translation of the Tcl code in
# ImagePlaneWidget.tcl. It could easily be written using a nice class
# to do the job but the present code should definitely make for an
# illustrative example.
# This example demonstrates how to use the vtkImagePlaneWidget
# to probe a 3D image dataset with three orthogonal planes.
# Buttons are provided to:
# a) capture the render window display to a tiff file
# b) x,y,z buttons reset the widget to orthonormal
# positioning, set the horizontal slider to move the
# associated widget along its normal, and set the
# camera to face the widget
# c) right clicking on x,y,z buttons pops up a menu to set
# the associated widget's reslice interpolation mode
import vtk
import Tkinter
from vtk.tk.vtkTkRenderWindowInteractor import \
vtkTkRenderWindowInteractor
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Start by loading some data.
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64, 64)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
v16.SetImageRange(1, 93)
v16.SetDataSpacing(3.2, 3.2, 1.5)
v16.Update()
xMin, xMax, yMin, yMax, zMin, zMax = v16.GetExecutive().GetWholeExtent(v16.GetOutputInformation(0))
spacing = v16.GetOutput().GetSpacing()
sx, sy, sz = spacing
origin = v16.GetOutput().GetOrigin()
ox, oy, oz = origin
# An outline is shown for context.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(v16.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# The shared picker enables us to use 3 planes at one time
# and gets the picking order right
picker = vtk.vtkCellPicker()
picker.SetTolerance(0.005)
# The 3 image plane widgets are used to probe the dataset.
planeWidgetX = vtk.vtkImagePlaneWidget()
planeWidgetX.DisplayTextOn()
planeWidgetX.SetInputConnection(v16.GetOutputPort())
planeWidgetX.SetPlaneOrientationToXAxes()
planeWidgetX.SetSliceIndex(32)
planeWidgetX.SetPicker(picker)
planeWidgetX.SetKeyPressActivationValue("x")
prop1 = planeWidgetX.GetPlaneProperty()
prop1.SetColor(1, 0, 0)
planeWidgetY = vtk.vtkImagePlaneWidget()
planeWidgetY.DisplayTextOn()
planeWidgetY.SetInputConnection(v16.GetOutputPort())
planeWidgetY.SetPlaneOrientationToYAxes()
planeWidgetY.SetSliceIndex(32)
planeWidgetY.SetPicker(picker)
planeWidgetY.SetKeyPressActivationValue("y")
prop2 = planeWidgetY.GetPlaneProperty()
prop2.SetColor(1, 1, 0)
planeWidgetY.SetLookupTable(planeWidgetX.GetLookupTable())
# for the z-slice, turn off texture interpolation:
# interpolation is now nearest neighbour, to demonstrate
# cross-hair cursor snapping to pixel centers
planeWidgetZ = vtk.vtkImagePlaneWidget()
planeWidgetZ.DisplayTextOn()
planeWidgetZ.SetInputConnection(v16.GetOutputPort())
planeWidgetZ.SetPlaneOrientationToZAxes()
planeWidgetZ.SetSliceIndex(46)
planeWidgetZ.SetPicker(picker)
planeWidgetZ.SetKeyPressActivationValue("z")
prop3 = planeWidgetZ.GetPlaneProperty()
prop3.SetColor(0, 0, 1)
planeWidgetZ.SetLookupTable(planeWidgetX.GetLookupTable())
# Create the RenderWindow and Renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# Add the outline actor to the renderer, set the background color and size
ren.AddActor(outlineActor)
renWin.SetSize(600, 600)
ren.SetBackground(0.1, 0.1, 0.2)
current_widget = planeWidgetZ
mode_widget = planeWidgetZ
# Create the GUI
# We first create the supporting functions (callbacks) for the GUI
#
# Align the camera so that it faces the desired widget
def AlignCamera():
#global ox, oy, oz, sx, sy, sz, xMax, xMin, yMax, yMin, zMax, \
# zMin, slice_number
#global current_widget
cx = ox+(0.5*(xMax-xMin))*sx
cy = oy+(0.5*(yMax-yMin))*sy
cz = oy+(0.5*(zMax-zMin))*sz
vx, vy, vz = 0, 0, 0
nx, ny, nz = 0, 0, 0
iaxis = current_widget.GetPlaneOrientation()
if iaxis == 0:
vz = -1
nx = ox + xMax*sx
cx = ox + slice_number*sx
elif iaxis == 1:
vz = -1
ny = oy+yMax*sy
cy = oy+slice_number*sy
else:
vy = 1
nz = oz+zMax*sz
cz = oz+slice_number*sz
px = cx+nx*2
py = cy+ny*2
pz = cz+nz*3
camera = ren.GetActiveCamera()
camera.SetViewUp(vx, vy, vz)
camera.SetFocalPoint(cx, cy, cz)
camera.SetPosition(px, py, pz)
camera.OrthogonalizeViewUp()
ren.ResetCameraClippingRange()
renWin.Render()
# Capture the display and place in a tiff
def CaptureImage():
w2i = vtk.vtkWindowToImageFilter()
writer = vtk.vtkTIFFWriter()
w2i.SetInput(renWin)
w2i.Update()
writer.SetInputConnection(w2i.GetOutputPort())
writer.SetFileName("image.tif")
renWin.Render()
writer.Write()
# Align the widget back into orthonormal position,
# set the slider to reflect the widget's position,
# call AlignCamera to set the camera facing the widget
def AlignXaxis():
global xMax, xMin, current_widget, slice_number
po = planeWidgetX.GetPlaneOrientation()
if po == 3:
planeWidgetX.SetPlaneOrientationToXAxes()
slice_number = (xMax-xMin)/2
planeWidgetX.SetSliceIndex(slice_number)
else:
slice_number = planeWidgetX.GetSliceIndex()
current_widget = planeWidgetX
slice.config(from_=xMin, to=xMax)
slice.set(slice_number)
AlignCamera()
def AlignYaxis():
global yMin, yMax, current_widget, slice_number
po = planeWidgetY.GetPlaneOrientation()
if po == 3:
planeWidgetY.SetPlaneOrientationToYAxes()
slice_number = (yMax-yMin)/2
planeWidgetY.SetSliceIndex(slice_number)
else:
slice_number = planeWidgetY.GetSliceIndex()
current_widget = planeWidgetY
slice.config(from_=yMin, to=yMax)
slice.set(slice_number)
AlignCamera()
def AlignZaxis():
global yMin, yMax, current_widget, slice_number
po = planeWidgetZ.GetPlaneOrientation()
if po == 3:
planeWidgetZ.SetPlaneOrientationToZAxes()
slice_number = (zMax-zMin)/2
planeWidgetZ.SetSliceIndex(slice_number)
else:
slice_number = planeWidgetZ.GetSliceIndex()
current_widget = planeWidgetZ
slice.config(from_=zMin, to=zMax)
slice.set(slice_number)
AlignCamera()
# Set the widget's reslice interpolation mode
# to the corresponding popup menu choice
def SetInterpolation():
global mode_widget, mode
if mode.get() == 0:
mode_widget.TextureInterpolateOff()
else:
mode_widget.TextureInterpolateOn()
mode_widget.SetResliceInterpolate(mode.get())
renWin.Render()
# Share the popup menu among buttons, keeping track of associated
# widget's interpolation mode
def buttonEvent(event, arg=None):
global mode, mode_widget, popm
if arg == 0:
mode_widget = planeWidgetX
elif arg == 1:
mode_widget = planeWidgetY
elif arg == 2:
mode_widget = planeWidgetZ
else:
return
mode.set(mode_widget.GetResliceInterpolate())
popm.entryconfigure(arg, variable=mode)
popm.post(event.x + event.x_root, event.y + event.y_root)
def SetSlice(sl):
global current_widget
current_widget.SetSliceIndex(int(sl))
ren.ResetCameraClippingRange()
renWin.Render()
###
# Now actually create the GUI
root = Tkinter.Tk()
root.withdraw()
top = Tkinter.Toplevel(root)
# Define a quit method that exits cleanly.
def quit(obj=root):
obj.quit()
# Popup menu
popm = Tkinter.Menu(top, tearoff=0)
mode = Tkinter.IntVar()
mode.set(1)
popm.add_radiobutton(label="nearest", variable=mode, value=0,
command=SetInterpolation)
popm.add_radiobutton(label="linear", variable=mode, value=1,
command=SetInterpolation)
popm.add_radiobutton(label="cubic", variable=mode, value=2,
command=SetInterpolation)
display_frame = Tkinter.Frame(top)
display_frame.pack(side="top", anchor="n", fill="both", expand="false")
# Buttons
ctrl_buttons = Tkinter.Frame(top)
ctrl_buttons.pack(side="top", anchor="n", fill="both", expand="false")
quit_button = Tkinter.Button(ctrl_buttons, text="Quit", command=quit)
capture_button = Tkinter.Button(ctrl_buttons, text="Tif",
command=CaptureImage)
x_button = Tkinter.Button(ctrl_buttons, text="x", command=AlignXaxis)
y_button = Tkinter.Button(ctrl_buttons, text="y", command=AlignYaxis)
z_button = Tkinter.Button(ctrl_buttons, text="z", command=AlignZaxis)
x_button.bind("<Button-3>", lambda e: buttonEvent(e, 0))
y_button.bind("<Button-3>", lambda e: buttonEvent(e, 1))
z_button.bind("<Button-3>", lambda e: buttonEvent(e, 2))
for i in (quit_button, capture_button, x_button, y_button, z_button):
i.pack(side="left", expand="true", fill="both")
# Create the render widget
renderer_frame = Tkinter.Frame(display_frame)
renderer_frame.pack(padx=3, pady=3,side="left", anchor="n",
fill="both", expand="false")
render_widget = vtkTkRenderWindowInteractor(renderer_frame,
rw=renWin, width=600,
height=600)
for i in (render_widget, display_frame):
i.pack(side="top", anchor="n",fill="both", expand="false")
# Add a slice scale to browse the current slice stack
slice_number = Tkinter.IntVar()
slice_number.set(current_widget.GetSliceIndex())
slice = Tkinter.Scale(top, from_=zMin, to=zMax, orient="horizontal",
command=SetSlice,variable=slice_number,
label="Slice")
slice.pack(fill="x", expand="false")
# Done with the GUI.
###
# Set the interactor for the widgets
iact = render_widget.GetRenderWindow().GetInteractor()
planeWidgetX.SetInteractor(iact)
planeWidgetX.On()
planeWidgetY.SetInteractor(iact)
planeWidgetY.On()
planeWidgetZ.SetInteractor(iact)
planeWidgetZ.On()
# Create an initial interesting view
cam1 = ren.GetActiveCamera()
cam1.Elevation(110)
cam1.SetViewUp(0, 0, -1)
cam1.Azimuth(45)
ren.ResetCameraClippingRange()
# Render it
render_widget.Render()
iact.Initialize()
renWin.Render()
iact.Start()
# Start Tkinter event loop
root.mainloop()
| bsd-3-clause |
jordiclariana/ansible | lib/ansible/modules/cloud/misc/virt_pool.py | 48 | 22292 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Maciej Delmanowski <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: virt_pool
author: "Maciej Delmanowski (@drybjed)"
version_added: "2.0"
short_description: Manage libvirt storage pools
description:
- Manage I(libvirt) storage pools.
options:
name:
required: false
aliases: [ "pool" ]
description:
- name of the storage pool being managed. Note that pool must be previously
defined with xml.
state:
required: false
choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ]
description:
- specify which state you want a storage pool to be in.
If 'active', pool will be started.
If 'present', ensure that pool is present but do not change its
state; if it's missing, you need to specify xml argument.
If 'inactive', pool will be stopped.
If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.
If 'deleted', pool contents will be deleted and then pool undefined.
command:
required: false
choices: [ "define", "build", "create", "start", "stop", "destroy",
"delete", "undefine", "get_xml", "list_pools", "facts",
"info", "status" ]
description:
- in addition to state management, various non-idempotent commands are available.
See examples.
autostart:
required: false
choices: ["yes", "no"]
description:
- Specify if a given storage pool should be started automatically on system boot.
uri:
required: false
default: "qemu:///system"
description:
- I(libvirt) connection uri.
xml:
required: false
description:
- XML document used with the define command.
mode:
required: false
choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]
description:
- Pass additional parameters to 'build' or 'delete' commands.
requirements:
- "python >= 2.6"
- "python-libvirt"
- "python-lxml"
'''
EXAMPLES = '''
# Define a new storage pool
- virt_pool:
command: define
name: vms
xml: '{{ lookup("template", "pool/dir.xml.j2") }}'
# Build a storage pool if it does not exist
- virt_pool:
command: build
name: vms
# Start a storage pool
- virt_pool:
command: create
name: vms
# List available pools
- virt_pool:
command: list_pools
# Get XML data of a specified pool
- virt_pool:
command: get_xml
name: vms
# Stop a storage pool
- virt_pool:
command: destroy
name: vms
# Delete a storage pool (destroys contents)
- virt_pool:
command: delete
name: vms
# Undefine a storage pool
- virt_pool:
command: undefine
name: vms
# Gather facts about storage pools
# Facts will be available as 'ansible_libvirt_pools'
- virt_pool:
command: facts
# Gather information about pools managed by 'libvirt' remotely using uri
- virt_pool:
command: info
uri: '{{ item }}'
with_items: '{{ libvirt_uris }}'
register: storage_pools
# Ensure that a pool is active (needs to be defined and built first)
- virt_pool:
state: active
name: vms
# Ensure that a pool is inactive
- virt_pool:
state: inactive
name: vms
# Ensure that a given pool will be started at boot
- virt_pool:
autostart: yes
name: vms
# Disable autostart for a given pool
- virt_pool:
autostart: no
name: vms
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
try:
import libvirt
except ImportError:
HAS_VIRT = False
else:
HAS_VIRT = True
try:
from lxml import etree
except ImportError:
HAS_XML = False
else:
HAS_XML = True
from ansible.module_utils.basic import AnsibleModule
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0 : "inactive",
1 : "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0 : "no",
1 : "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0 : "no",
1 : "yes"
}
ENTRY_STATE_INFO_MAP = {
0 : "inactive",
1 : "building",
2 : "running",
3 : "degraded",
4 : "inaccessible"
}
ENTRY_BUILD_FLAGS_MAP = {
"new" : 0,
"repair" : 1,
"resize" : 2,
"no_overwrite" : 4,
"overwrite" : 8
}
ENTRY_DELETE_FLAGS_MAP = {
"normal" : 0,
"zeroed" : 1
}
ALL_MODES = []
ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())
ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())
class EntryNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_entry(self, entryid):
# entryid = -1 returns a list of everything
results = []
# Get active entries
for name in self.conn.listStoragePools():
entry = self.conn.storagePoolLookupByName(name)
results.append(entry)
# Get inactive entries
for name in self.conn.listDefinedStoragePools():
entry = self.conn.storagePoolLookupByName(name)
results.append(entry)
if entryid == -1:
return results
for entry in results:
if entry.name() == entryid:
return entry
raise EntryNotFound("storage pool %s not found" % entryid)
def create(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).create()
else:
try:
state = self.find_entry(entryid).isActive()
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def destroy(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).destroy()
else:
if self.find_entry(entryid).isActive():
return self.module.exit_json(changed=True)
def undefine(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).undefine()
else:
if not self.find_entry(entryid):
return self.module.exit_json(changed=True)
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
def get_xml(self, entryid):
return self.find_entry(entryid).XMLDesc(0)
def get_info(self, entryid):
return self.find_entry(entryid).info()
def get_volume_count(self, entryid):
return self.find_entry(entryid).numOfVolumes()
def get_volume_names(self, entryid):
return self.find_entry(entryid).listVolumes()
def get_devices(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
if xml.xpath('/pool/source/device'):
result = []
for device in xml.xpath('/pool/source/device'):
result.append(device.get('path'))
try:
return result
except:
raise ValueError('No devices specified')
def get_format(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/pool/source/format')[0].get('type')
except:
raise ValueError('Format not specified')
return result
def get_host(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/pool/source/host')[0].get('name')
except:
raise ValueError('Host not specified')
return result
def get_source_path(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/pool/source/dir')[0].get('path')
except:
raise ValueError('Source path not specified')
return result
def get_path(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
return xml.xpath('/pool/target/path')[0].text
def get_type(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
return xml.get('type')
def build(self, entryid, flags):
if not self.module.check_mode:
return self.find_entry(entryid).build(flags)
else:
try:
state = self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def delete(self, entryid, flags):
if not self.module.check_mode:
return self.find_entry(entryid).delete(flags)
else:
try:
state = self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
if state:
return self.module.exit_json(changed=True)
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).autostart()
else:
try:
return self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
def set_autostart(self, entryid, val):
if not self.module.check_mode:
return self.find_entry(entryid).setAutostart(val)
else:
try:
state = self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
if bool(state) != val:
return self.module.exit_json(changed=True)
def refresh(self, entryid):
return self.find_entry(entryid).refresh()
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
return self.conn.storagePoolDefineXML(xml)
else:
try:
self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
class VirtStoragePool(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
self.conn = LibvirtConnection(self.uri, self.module)
def get_pool(self, entryid):
return self.conn.find_entry(entryid)
def list_pools(self, state=None):
results = []
for entry in self.conn.find_entry(-1):
if state:
if state == self.conn.get_status2(entry):
results.append(entry.name())
else:
results.append(entry.name())
return results
def state(self):
results = []
for entry in self.list_pools():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry,state_blurb))
return results
def autostart(self, entryid):
return self.conn.set_autostart(entryid, True)
def get_autostart(self, entryid):
return self.conn.get_autostart2(entryid)
def set_autostart(self, entryid, state):
return self.conn.set_autostart(entryid, state)
def create(self, entryid):
return self.conn.create(entryid)
def start(self, entryid):
return self.conn.create(entryid)
def stop(self, entryid):
return self.conn.destroy(entryid)
def destroy(self, entryid):
return self.conn.destroy(entryid)
def undefine(self, entryid):
return self.conn.undefine(entryid)
def status(self, entryid):
return self.conn.get_status(entryid)
def get_xml(self, entryid):
return self.conn.get_xml(entryid)
def define(self, entryid, xml):
return self.conn.define_from_xml(entryid, xml)
def build(self, entryid, flags):
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
def delete(self, entryid, flags):
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
def refresh(self, entryid):
return self.conn.refresh(entryid)
def info(self):
return self.facts(facts_mode='info')
def facts(self, facts_mode='facts'):
results = dict()
for entry in self.list_pools():
results[entry] = dict()
if self.conn.find_entry(entry):
data = self.conn.get_info(entry)
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
results[entry] = {
"status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
"size_total" : str(data[1]),
"size_used" : str(data[2]),
"size_available" : str(data[3]),
}
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
results[entry]["state"] = self.conn.get_status(entry)
results[entry]["path"] = self.conn.get_path(entry)
results[entry]["type"] = self.conn.get_type(entry)
results[entry]["uuid"] = self.conn.get_uuid(entry)
if self.conn.find_entry(entry).isActive():
results[entry]["volume_count"] = self.conn.get_volume_count(entry)
results[entry]["volumes"] = list()
for volume in self.conn.get_volume_names(entry):
results[entry]["volumes"].append(volume)
else:
results[entry]["volume_count"] = -1
try:
results[entry]["host"] = self.conn.get_host(entry)
except ValueError:
pass
try:
results[entry]["source_path"] = self.conn.get_source_path(entry)
except ValueError:
pass
try:
results[entry]["format"] = self.conn.get_format(entry)
except ValueError:
pass
try:
devices = self.conn.get_devices(entry)
results[entry]["devices"] = devices
except ValueError:
pass
else:
results[entry]["state"] = self.conn.get_status(entry)
facts = dict()
if facts_mode == 'facts':
facts["ansible_facts"] = dict()
facts["ansible_facts"]["ansible_libvirt_pools"] = results
elif facts_mode == 'info':
facts['pools'] = results
return facts
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
mode = module.params.get('mode', None)
v = VirtStoragePool(uri, module)
res = {}
if state and command == 'list_pools':
res = v.list_pools(state=state)
if not isinstance(res, dict):
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg = "state change requires a specified name")
res['changed'] = False
if state in [ 'active' ]:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in [ 'present' ]:
try:
v.get_pool(name)
except EntryNotFound:
if not xml:
module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in [ 'inactive' ]:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in [ 'undefined', 'absent' ]:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
elif state in [ 'deleted' ]:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
v.delete(name, mode)
res['changed'] = True
res['msg'] = v.undefine(name)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg = "%s requires 1 argument: name" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
try:
v.get_pool(name)
except EntryNotFound:
v.define(name, xml)
res = {'changed': True, 'created': name}
return VIRT_SUCCESS, res
elif command == 'build':
res = v.build(name, mode)
if not isinstance(res, dict):
res = { 'changed': True, command: res }
return VIRT_SUCCESS, res
elif command == 'delete':
res = v.delete(name, mode)
if not isinstance(res, dict):
res = { 'changed': True, command: res }
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % command)
if autostart is not None:
if not name:
module.fail_json(msg = "state change requires a specified name")
res['changed'] = False
if autostart:
if not v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, True)
else:
if v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, False)
return VIRT_SUCCESS, res
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule (
argument_spec = dict(
name = dict(aliases=['pool']),
state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
autostart = dict(type='bool'),
mode = dict(choices=ALL_MODES),
),
supports_check_mode = True
)
if not HAS_VIRT:
module.fail_json(
msg='The `libvirt` module is not importable. Check the requirements.'
)
if not HAS_XML:
module.fail_json(
msg='The `lxml` module is not importable. Check the requirements.'
)
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
gregdek/ansible | lib/ansible/plugins/lookup/vars.py | 55 | 3004 | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: vars
author: Ansible Core
version_added: "2.5"
short_description: Lookup templated value of variables
description:
- Retrieves the value of an Ansible variable.
options:
_terms:
description: The variable names to look up.
required: True
default:
description:
- What to return if a variable is undefined.
- If no default is set, it will result in an error if any of the variables is undefined.
"""
EXAMPLES = """
- name: Show value of 'variablename'
debug: msg="{{ lookup('vars', 'variabl' + myvar)}}"
vars:
variablename: hello
myvar: ename
- name: Show default empty since i dont have 'variablnotename'
debug: msg="{{ lookup('vars', 'variabl' + myvar, default='')}}"
vars:
variablename: hello
myvar: notename
- name: Produce an error since i dont have 'variablnotename'
debug: msg="{{ lookup('vars', 'variabl' + myvar)}}"
ignore_errors: True
vars:
variablename: hello
myvar: notename
- name: find several related variables
debug: msg="{{ lookup('vars', 'ansible_play_hosts', 'ansible_play_batch', 'ansible_play_hosts_all') }}"
- name: alternate way to find some 'prefixed vars' in loop
debug: msg="{{ lookup('vars', 'ansible_play_' + item) }}"
loop:
- hosts
- batch
- hosts_all
"""
RETURN = """
_value:
description:
- value of the variables requested.
"""
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if variables is not None:
self._templar.set_available_variables(variables)
myvars = getattr(self._templar, '_available_variables', {})
self.set_options(direct=kwargs)
default = self.get_option('default')
ret = []
for term in terms:
if not isinstance(term, string_types):
raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
try:
try:
value = myvars[term]
except KeyError:
try:
value = myvars['hostvars'][myvars['inventory_hostname']][term]
except KeyError:
raise AnsibleUndefinedVariable('No variable found with this name: %s' % term)
ret.append(self._templar.template(value, fail_on_undefined=True))
except AnsibleUndefinedVariable:
if default is not None:
ret.append(default)
else:
raise
return ret
| gpl-3.0 |
roselleebarle04/django | django/conf/locale/el/formats.py | 446 | 1477 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd/m/Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'd/m/Y P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', '%Y-%m-%d', # '25/10/2006', '25/10/06', '2006-10-25',
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
CSC301H-Fall2013/JuakStore | site-packages/django/db/utils.py | 100 | 6204 | import os
import pkgutil
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils._os import upath
from django.utils import six
DEFAULT_DB_ALIAS = 'default'
# Define some exceptions that mirror the PEP249 interface.
# We will rethrow any backend-specific errors using these
# common wrappers
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
def load_backend(backend_name):
# Look for a fully qualified database backend name
try:
return import_module('.base', backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name != 'dummy']
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases):
if not databases:
self.databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
else:
self.databases = databases
self._connections = local()
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', 'UTC' if settings.USE_TZ else settings.TIME_ZONE)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
for setting in ['TEST_CHARSET', 'TEST_COLLATION', 'TEST_NAME', 'TEST_MIRROR']:
conn.setdefault(setting, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
class ConnectionRouter(object):
def __init__(self, routers):
self.routers = []
for r in routers:
if isinstance(r, six.string_types):
try:
module_name, klass_name = r.rsplit('.', 1)
module = import_module(module_name)
except ImportError as e:
raise ImproperlyConfigured('Error importing database router %s: "%s"' % (klass_name, e))
try:
router_class = getattr(module, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a database router name "%s"' % (module, klass_name))
else:
router = router_class()
else:
router = r
self.routers.append(router)
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
try:
return hints['instance']._state.db or DEFAULT_DB_ALIAS
except KeyError:
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_syncdb(self, db, model):
for router in self.routers:
try:
method = router.allow_syncdb
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(db, model)
if allow is not None:
return allow
return True
| mit |
itucsdb1611/itucsdb1611 | classes/operations/project_operations.py | 1 | 4444 |
import psycopg2 as dbapi2
import datetime
from classes.model_config import dsn
class project_operations:
def __init__(self):
self.last_key = None
def add_project(self, Project):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute(
"INSERT INTO Project(Name, Description, ProjectTypeId, ProjectThesisTypeId, DepartmentId, ProjectStatusTypeId, StartDate, EndDate, MemberLimit, CreatedByPersonId, ProjectManagerId, Deleted) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, False )",
(Project.title, Project.project_description, Project.project_type, Project.project_thesis_type,
Project.department, Project.project_status_type, Project.start_date, Project.end_date,
Project.member_limit, Project.created_by, Project.manager))
connection.commit()
self.last_key = cursor.lastrowid
def delete_project(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute("""DELETE FROM Project WHERE (ObjectId=%s)""", (key,))
connection.commit()
def update_project(self, key, title, project_description, end_date, member_limit, manager, deleted):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute(
"""UPDATE Project SET Name = %s, Description = %s, EndDate = %s, MemberLimit = %s, ProjectManagerId = %s, Deleted = %s WHERE (ObjectId=%s)""",
(title, project_description, end_date, member_limit, manager, deleted, key))
connection.commit()
def get_project(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
query = """SELECT Project.Name, Project.Description, ProjectType.Name, Department.Name, ProjectStatusType.Name, Person.FirstName, Person.LastName, Project.ObjectId, Project.CreatedByPersonId, Project.EndDate, Project.MemberLimit FROM Project
JOIN ProjectType ON(Project.ProjectTypeId=ProjectType.ObjectId)
JOIN Department ON(Project.DepartmentId = Department.ObjectId)
JOIN ProjectStatusType ON(Project.ProjectStatusTypeId=ProjectStatusType.ObjectId)
JOIN Person ON(Project.CreatedByPersonId=Person.ObjectId)
WHERE (Project.ObjectID = %s)"""
cursor.execute(query, (key,))
project = cursor.fetchone()
connection.commit()
return project
def get_projects(self):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute("""SELECT Project.ObjectId, Project.Name, Description, Department.Name, Person.FirstName, Person.LastName
FROM Project JOIN Department ON(Project.DepartmentId = Department.ObjectId) JOIN Person ON(Person.ObjectId = Project.ProjectManagerId)""")
projects = cursor.fetchall()
connection.commit()
return projects
def get_project_member_limit(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute("""SELECT MemberLimit FROM Project WHERE (ObjectId=%s)""", (key,))
projects = cursor.fetchall()
connection.commit()
return projects
def get_the_projects_of_a_person(self, key):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
query = """SELECT Project.Name, Project.Description, ProjectType.Name, Project.ObjectId FROM Project
JOIN ProjectType ON(Project.ProjectTypeId=ProjectType.ObjectId)
JOIN Team ON(Project.ObjectId = Team.ProjectId)
WHERE (Team.MemberId = %s)"""
cursor.execute(query, (key,))
project_ids = cursor.fetchall()
connection.commit()
return project_ids
def get_last(self):
with dbapi2.connect(dsn) as connection:
cursor = connection.cursor()
cursor.execute("""SELECT ObjectId FROM Project Order By ObjectId Desc LIMIT 1""")
projectId = cursor.fetchone()
connection.commit()
return projectId
| gpl-3.0 |
andensinlimite/metaespacio | metaespacio/metaespacio/settings.py | 2 | 4507 | """
Django settings for metaespacio project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '57#f7u+v@yh*vwv^ox#%*wgx6c@_a*%8#)0@1f6#dt=oar4u$f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = (os.getuid() >= 1000)
PRODUCCION = False
TEMPLATE_DEBUG = DEBUG
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_extensions',
'oidc_provider',
'crispy_forms',
'common',
'registro',
'espacios',
'pages',
'taquilla',
'encuestas',
# 'bibliotheca', # necesita actualizar a 1.8
# 'tastypie', # necesita actualizar a 1.8
'django.contrib.admin', # al final por un override de templates
'cuotas',
'graphos',
'contabilidad',
'adjuntos',
'caronte',
'rest',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
from django.conf import global_settings
TEMPLATE_CONTEXT_PROCESSORS = \
global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'common.context_processor.site',
)
ROOT_URLCONF = 'metaespacio.urls'
WSGI_APPLICATION = 'metaespacio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# SITE_ID = 1
TEMPLATE_LOADERS = ('django.template.loaders.app_directories.Loader', )
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
}
}
# cosas custom
MEDIA_URL = "/media/"
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = "/"
CRISPY_TEMPLATE_PACK = "bootstrap3"
SITE_URL = 'http://metaespacio.org'
LOGIN_URL = '/accounts/login'
OIDC_RSA_KEY_FOLDER = BASE_DIR
DEFAULT_FROM_EMAIL = '[email protected]'
try:
from .settings_local import * # noqa
except ImportError:
pass
if DEBUG:
# static en desarrollo en carpeta del proyecto
STATIC_ROOT = os.path.join(BASE_DIR, '.static')
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# en desarrollo no se usa
ALLOWED_HOSTS = []
INSTALLED_APPS += ('debug_toolbar', )
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', )
if PRODUCCION:
# FIXME Esto revisarlo porque tampoco lo estamos poniendo aqui exactamente
STATIC_ROOT = '/var/www/metaespacio/static/'
MEDIA_ROOT = '/opt/metaespacio/media/'
# en preproduccion o produccion si se usa
ALLOWED_HOSTS = ['*']
else:
# errores por consola
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
| agpl-3.0 |
geekboxzone/lollipop_external_chromium_org | third_party/markdown/extensions/codehilite.py | 109 | 10820 | # markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/code_hilite.html>
Contact: [email protected]
License: BSD (see ../LICENSE.md for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments](http://pygments.org/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
import warnings
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer
from pygments.formatters import HtmlFormatter
pygments = True
except ImportError:
pygments = False
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite(object):
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenums: (Boolean) Set line numbering to 'on' (True), 'off' (False) or 'auto'(None).
Set to 'auto' by default.
* guess_lang: (Boolean) Turn language auto-detection 'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenums=None, guess_lang=True,
css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4):
self.src = src
self.lang = lang
self.linenums = linenums
self.guess_lang = guess_lang
self.css_class = css_class
self.style = style
self.noclasses = noclasses
self.tab_length = tab_length
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._getLang()
if pygments:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src)
else:
lexer = TextLexer()
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=self.linenums,
cssclass=self.css_class,
style=self.style,
noclasses=self.noclasses)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('language-%s' % self.lang)
if self.linenums:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n'% \
(self.css_class, class_str, txt)
def _getLang(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.linenums is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.linenums = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
linenums=self.config['linenums'],
guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'],
style=self.config['pygments_style'],
noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length)
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'linenums': [None, "Use lines numbers. True=yes, False=no, None=auto"],
'force_linenos' : [False, "Depreciated! Use 'linenums' instead. Force line numbers - Default: False"],
'guess_lang' : [True, "Automatic language detection - Default: True"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'],
'noclasses': [False, 'Use inline styles instead of CSS classes - Default false']
}
# Override defaults with user settings
for key, value in configs:
# convert strings to booleans
if value == 'True': value = True
if value == 'False': value = False
if value == 'None': value = None
if key == 'force_linenos':
warnings.warn('The "force_linenos" config setting'
' to the CodeHilite extension is deprecrecated.'
' Use "linenums" instead.', PendingDeprecationWarning)
if value:
# Carry 'force_linenos' over to new 'linenos'.
self.setConfig('linenums', True)
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self)
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)
| bsd-3-clause |
dennisfrancis/PacketManipulator | umit/pm/core/atoms.py | 2 | 12717 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008, 2009 Adriano Monteiro Marques
#
# Author: Francesco Piccinno <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module contains class that could be useful in various parts of the
program
"""
import sys
import copy
import Queue
import threading
import StringIO
import traceback
from HTMLParser import HTMLParser
from umit.pm.core.logger import log
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
# Ordered dict python implementation
class odict(dict):
def __init__(self, d={}):
self._keys = d.keys()
dict.__init__(self, d)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
# a peculiar sharp edge from copy.deepcopy
# we'll have our set item called without __init__
if not hasattr(self, '_keys'):
self._keys = [key,]
if key not in self._keys:
self._keys.append(key)
def clear(self):
dict.clear(self)
self._keys = []
def items(self):
items = []
for i in self._keys:
items.append(i, self[i])
return items
def keys(self):
return self._keys
def popitem(self):
if len(self._keys) == 0:
raise KeyError('dictionary is empty')
else:
key = self._keys[-1]
val = self[key]
del self[key]
return key, val
def setdefault(self, key, failobj = None):
dict.setdefault(self, key, failobj)
if key not in self._keys:
self._keys.append(key)
def update(self, d):
for key in d.keys():
if not self.has_key(key):
self._keys.append(key)
dict.update(self, d)
def values(self):
v = []
for i in self._keys:
v.append(self[i])
return v
def move(self, key, index):
""" Move the specified to key to *before* the specified index. """
try:
cur = self._keys.index(key)
except ValueError:
raise KeyError(key)
self._keys.insert(index, key)
# this may have shifted the position of cur, if it is after index
if cur >= index: cur = cur + 1
del self._keys[cur]
def index(self, key):
if not self.has_key(key):
raise KeyError(key)
return self._keys.index(key)
def __iter__(self):
for k in self._keys:
yield k
# Simple decorator for compatibility with python 2.4 (with statement)
def with_decorator(func):
def proxy(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
proxy.__name__ = func.__name__
proxy.__dict__ = func.__dict__
proxy.__doc__ = func.__doc__
return proxy
def generate_traceback():
fp = StringIO.StringIO()
traceback.print_exc(file=fp)
return fp.getvalue()
class Node(object):
"""
A simple Node class to create Binary tree.
To create a tree simply do tree = Node()
"""
def __init__(self, data=None, children=[]):
"""
Initialize a Node object
@param data the data for the Node or None if you are constructing
a Tree object
@param children a list of Node objects
"""
self.data = data
self.root = None
self.children = []
for child in children:
self.append_node(child)
def append_node(self, node):
"""
Append a child node
@param node a Node object
"""
assert (isinstance(node, Node))
node.root = self
self.children.append(node)
def __iter__(self):
if self.data:
yield self
for child in self.children:
for c in child:
yield c
def __repr__(self):
if self.root != None:
return "%sChild -> %s (%d)" % (" " * self.get_depth(), self.data,
len(self.children))
else:
return "Tree %s" % object.__repr__(self)
def get_depth(self):
idx = 0
root = self.root
while root:
root = root.root
idx += 1
return idx
def __len__(self):
tot = 0
for node in self.children:
tot += len(node)
if self.data:
tot += 1
return tot
def get_parent(self):
return self.root
def get_data(self):
return self.data
def get_children(self):
for node in self.children:
yield node
def is_parent(self):
return self.children != []
def __getitem__(self, x):
return self.children[x]
def find(self, value):
for i in self:
if value == i.data:
return i.get_path()
return None
def get_path(self):
path = []
find = self
root = self.root
while root:
path.append(root.index(find))
root = root.root
find = find.root
path.reverse()
return tuple(path)
def get_next_of(self, node):
try:
return self[self.index(node) + 1]
except:
return None
def index(self, node):
return self.children.index(node)
def get_from_path(self, path):
root = self
for idx in path:
root = root[idx]
return root
def sort(self):
for node in self.children:
node.sort()
self.children.sort()
def __cmp__(self, node):
if not self:
return 1
if not node:
return -1
return cmp(self.data, node.data)
WorkerStop = object()
class ThreadPool(object):
MIN_THREADS = 5
MAX_THREADS = 20
IS_DAEMON = True
started = False
joined = False
workers = 0
def __init__(self, minthreads=5, maxthreads=20):
assert minthreads >= 0
assert minthreads <= maxthreads
self.queue = Queue.Queue(0)
self.min = minthreads
self.max = maxthreads
self.waiters = []
self.threads = []
self.working = []
def queue_work(self, callback, errback, func, *args, **kwargs):
if self.joined:
return
obj = (callback, errback, func, args, kwargs)
self.queue.put(obj)
if self.started:
self.resize()
def start(self):
self.joined = False
self.started = True
self.resize()
def stop(self):
self.joined = True
threads = copy.copy(self.threads)
while self.workers:
self.queue.put(WorkerStop)
self.workers -= 1
def join_threads(self):
# check out for exceptions on already joined
# threads.
threads = copy.copy(self.threads)
for thread in threads:
thread.join()
def resize(self, minthreads=None, maxthreads=None):
minthreads = max(minthreads, self.MIN_THREADS)
maxthreads = max(minthreads, self.MAX_THREADS)
assert minthreads >= 0
assert minthreads <= maxthreads
self.min = minthreads
self.max = maxthreads
if not self.started:
return
while self.workers > self.max:
self.stop_worker()
while self.workers < self.min:
self.start_worker()
self.start_needed_workers()
def start_needed_workers(self):
size = self.queue.qsize() + len(self.working)
while self.workers < min(self.max, size):
self.start_worker()
def start_worker(self):
self.workers += 1
thread = threading.Thread(target=self._worker)
thread.setDaemon(self.IS_DAEMON)
self.threads.append(thread)
thread.start()
def stop_worker(self):
self.queue.put(WorkerStop)
self.workers -= 1
def _worker(self):
ct = threading.currentThread()
obj = self.queue.get()
while obj is not WorkerStop:
self.working.append(ct)
(callback, errback, func, args, kw) = obj
try:
try:
result = func(*args, **kw)
except Exception, exc:
log.error("Handling exception %s Traceback:" % exc)
log.error(generate_traceback())
if errback is not None:
errback(sys.exc_info()[1])
else:
if callback is not None:
callback(result)
except Exception, err:
log.critical("Thread exceptions ignored. Traceback:")
log.critical(generate_traceback())
self.working.remove(ct)
self.waiters.append(ct)
obj = self.queue.get()
self.waiters.remove(ct)
self.threads.remove(ct)
class Interruptable:
"""
Interruptable interface
"""
def start(self):
raise Exception("Implement me")
def terminate(self):
raise Exception("Implement me")
def isAlive(self):
raise Exception("Implement me")
class Singleton(object):
"""
A class for singleton pattern
Support also gobject if Singleton base subclass if specified first
"""
instances = {}
def __new__(cls, *args, **kwargs):
from gobject import GObject
if Singleton.instances.get(cls) is None:
cls.__original_init__ = cls.__init__
if issubclass(cls, GObject):
Singleton.instances[cls] = GObject.__new__(cls)
else:
Singleton.instances[cls] = object.__new__(cls, *args, **kwargs)
elif cls.__init__ == cls.__original_init__:
def nothing(*args, **kwargs):
pass
cls.__init__ = nothing
return Singleton.instances[cls]
class HTMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_stripped_data(self):
return ''.join(self.fed)
def strip_tags(x):
s = HTMLStripper()
s.feed(x)
return s.get_stripped_data()
__all__ = ['strip_tags', 'Singleton', 'Interruptable', 'ThreadPool', 'Node', \
'generate_traceback', 'with_decorator', 'defaultdict', 'odict']
| gpl-2.0 |
agbell/karaka | karaka/api/apiconfig.py | 4 | 1689 | #
# Karaka Skype-XMPP Gateway: API configuration handler
# <http://www.vipadia.com/products/karaka.html>
#
# Copyright (C) 2008-2009 Vipadia Limited
# Richard Mortier <[email protected]>
# Neil Stratford <[email protected]>
#
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License version
## 2 as published by the Free Software Foundation.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License version 2 for more details.
## You should have received a copy of the GNU General Public License
## version 2 along with this program; if not, write to the Free
## Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
## MA 02110-1301, USA.
import ConfigParser
FILENAME = '/etc/karaka-api.conf'
class APIConfig:
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read(FILENAME)
self.sql_server = self.get("mysql", "server")
self.sql_database = self.get("mysql", "database")
self.sql_user = self.get("mysql", "user")
self.sql_password = self.get("mysql", "password")
self.marketing_message = self.get("default","mood")
self.complete = True
def get(self, section, option):
if self.config.has_option(section, option):
return self.config.get(section, option)
else:
print "No option " + option + " in section " + section + " in " + FILENAME
self.complete = False
return ""
| gpl-2.0 |
Multirom-mi4i/android_kernel_xiaomi_ferrari | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
sbrisard/janus | examples/fftw_python_benchmark_mpi.py | 1 | 1237 | import sys
import time
import numpy as np
import janus.fft.parallel
from mpi4py import MPI
def benchmark(shape, niter):
comm = MPI.COMM_WORLD
root = 0
transform = janus.fft.parallel.create_real(shape, comm)
local_sizes = comm.gather((transform.ishape[0], transform.offset0))
if comm.rank == root:
r = np.random.uniform(-1., 1., transform.shape)
else:
r= None
rloc = np.empty(transform.ishape, dtype=np.float64)
comm.Scatterv(r, rloc, root)
cloc = np.empty(transform.oshape, dtype=np.float64)
times = []
for i in range(niter):
t1 = time.perf_counter()
transform.r2c(rloc, cloc)
t2 = time.perf_counter()
times.append(1E3 * (t2 - t1))
return np.mean(times), np.std(times)
if __name__ == '__main__':
janus.fft.parallel.init()
np.random.seed(20140121)
params = [((128, 128, 128), 15000),
((256, 256, 256), 10000),
((512, 512, 512), 1000)]
for shape, niter in params:
mean, std = benchmark(shape, niter)
if MPI.COMM_WORLD.rank == 0:
args = map(str, shape + (niter, MPI.COMM_WORLD.size, mean, std))
print(','.join(args), flush=True)
MPI.Finalize()
| bsd-3-clause |
snnn/tensorflow | tensorflow/contrib/data/python/kernel_tests/assert_element_shape_test.py | 8 | 8680 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
class AssertElementShapeTest(test_base.DatasetTestBase):
def test_assert_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(expected_shapes, dataset.output_shapes)
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 10)))
iterator = (
dataset.apply(batching.assert_element_shape(wrong_shapes))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def test_assert_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(5).map(create_dataset)
partial_expected_shape = (
tensor_shape.TensorShape(None), # Unknown shape
tensor_shape.TensorShape((None, 4))) # Partial shape
result = dataset.apply(
batching.assert_element_shape(partial_expected_shape))
# Partial shapes are merged with actual shapes:
actual_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((3, 4)))
self.assertEqual(actual_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape(self):
def create_dataset(_):
return (array_ops.ones(2, dtype=dtypes.float32),
array_ops.zeros((3, 4), dtype=dtypes.int32))
dataset = dataset_ops.Dataset.range(3).map(create_dataset)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
with self.assertRaises(ValueError):
dataset.apply(batching.assert_element_shape(wrong_shapes))
def test_assert_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(5).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
expected_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 4)))
result = dataset.apply(batching.assert_element_shape(expected_shapes))
self.assertEqual(expected_shapes, result.output_shapes)
iterator = result.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(5):
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def test_assert_wrong_partial_element_shape_on_unknown_shape_dataset(self):
def create_unknown_shape_dataset(x):
return script_ops.py_func(
lambda _: ( # pylint: disable=g-long-lambda
np.ones(2, dtype=np.float32),
np.zeros((3, 4), dtype=np.int32)),
[x],
[dtypes.float32, dtypes.int32])
dataset = dataset_ops.Dataset.range(3).map(create_unknown_shape_dataset)
unknown_shapes = (tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None))
self.assertEqual(unknown_shapes, dataset.output_shapes)
wrong_shapes = (tensor_shape.TensorShape(2),
tensor_shape.TensorShape((None, 10)))
iterator = (
dataset.apply(batching.assert_element_shape(wrong_shapes))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 |
alxgu/ansible | lib/ansible/modules/network/f5/bigip_device_group_member.py | 38 | 8383 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_group_member
short_description: Manages members in a device group
description:
- Manages members in a device group. Members in a device group can only
be added or removed, never updated. This is because the members are
identified by unique name values and changing that name would invalidate
the uniqueness.
version_added: 2.5
options:
name:
description:
- Specifies the name of the device that you want to add to the
device group. Often this will be the hostname of the device.
This member must be trusted by the device already. Trusting
can be done with the C(bigip_device_trust) module and the
C(peer_hostname) option to that module.
type: str
required: True
device_group:
description:
- The device group that you want to add the member to.
type: str
required: True
state:
description:
- When C(present), ensures that the device group member exists.
- When C(absent), ensures the device group member is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the current device to the "device_trust_group" device group
bigip_device_group_member:
name: "{{ inventory_hostname }}"
device_group: device_trust_group
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add the hosts in the current scope to "device_trust_group"
bigip_device_group_member:
name: "{{ item }}"
device_group: device_trust_group
provider:
password: secret
server: lb.mydomain.com
user: admin
loop: "{{ hostvars.keys() }}"
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = []
updatables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return False
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to remove the member from the device group.")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group,
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cm/device-group/{2}/devices/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.device_group,
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
device_group=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
Lyrositor/moul-scripts | Python/system/random.py | 10 | 32008 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* Without a direct way to compute N steps forward, the semantics of
jumpahead(n) are weakened to simply jump to another distant state and rely
on the large period to avoid overlapping sequences.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from __future__ import division
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
import hashlib as _hashlib
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandbits() method so that randrange() can cover
arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( long(x) % (2**32) for x in internalstate )
except ValueError, e:
raise TypeError, e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Change the internal state to one that is likely far away
from the current state. This method will not be in Py3.x,
so it is better to simply reseed.
"""
# The super.jumpahead() method uses shuffling to change state,
# so it needs a large and "interesting" n to work with. Here,
# we use hashing to create a large n for the shuffle.
s = repr(n) + repr(self.getstate())
n = int(_hashlib.new('sha512', s).hexdigest(), 16)
super(Random, self).jumpahead(n)
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return r
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
return int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError, "sample larger than population"
random = self.random
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize or hasattr(population, "keys"):
# An n-length list is smaller than a k-length set, or this is a
# mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError): # handle (at least) sets
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = long(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def _stub(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
seed = jumpahead = _stub
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| gpl-3.0 |
pllim/astropy | astropy/io/fits/column.py | 3 | 98069 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import re
import sys
import warnings
import weakref
import numbers
from functools import reduce
from collections import OrderedDict
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .card import Card, CARD_LENGTH
from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,
NotifierMixin)
from .verify import VerifyError, VerifyWarning
from astropy.utils import lazyproperty, isiterable, indent
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Column', 'ColDefs', 'Delayed']
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4',
'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS['b1'] = 'L'
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS['u2'] = 'I'
NUMPY2FITS['u4'] = 'J'
NUMPY2FITS['u8'] = 'K'
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS['f2'] = 'E'
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A']
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {'E': 'D', 'C': 'M'}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0),
'E': (15, 7), 'F': (16, 7), 'D': (25, 17)}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)|')
TDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \
re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|')
TDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \
TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \
re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)'
r'(?:\.{0,1}(?P<precision>[0-9]+))?))|')
TDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \
TDISP_RE_DICT['D'] = \
re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\.'
r'(?P<precision>[0-9]+))+)'
r'(?:E{0,1}(?P<exponential>[0-9]+)?)|')
TDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \
re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)')
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
'I': '{{:{width}d}}',
'B': '{{:{width}b}}',
'O': '{{:{width}o}}',
'Z': '{{:{width}x}}',
'F': '{{:{width}.{precision}f}}',
'G': '{{:{width}.{precision}g}}'
}
TDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}'
TDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \
TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] = '{{:{width}.{precision}e}}'
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',
'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS')
KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero',
'disp', 'start', 'dim', 'coord_type', 'coord_unit',
'coord_ref_point', 'coord_ref_value', 'coord_inc',
'time_ref_pos')
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])'
r'(?P<option>[!-~]*)', re.I)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|'
r'(?:(?P<formatf>[FED])'
r'(?:(?P<widthf>[0-9]+)\.'
r'(?P<precision>[0-9]+))?)')
TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+')
"""
Regular expression for valid table column names. See FITS Standard v3.0 section
7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*')
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = '---'
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ('P', 'Q'):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == 'P':
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ''
else:
repeat = str(self.repeat)
return f'{repeat}{self.format}{self.option}'
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = \
_parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == 'L':
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ('E', 'F', 'D'):
return f'{self.format}{self.width}.{self.precision}'
return f'{self.format}{self.width}'
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + 'u1')
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f'{self.repeat}X'
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])'
r'(?:\((?P<max>\d*)\))?')
_format_code = 'P'
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = '2i4'
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group('dtype') not in FITS2NUMPY:
raise VerifyError(f'Invalid column format: {format}')
repeat = m.group('repeat')
array_dtype = m.group('dtype')
max = m.group('max')
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = '' if self.repeat is None else self.repeat
max = '' if self.max is None else self.max
return f'{repeat}{self._format_code}{self.format}({max})'
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = 'Q'
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = '2i8'
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify('column_attribute_changed', obj, self._attr[1:], old_value,
value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(self, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None, dim=None,
array=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None, coord_inc=None,
time_ref_pos=None):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError('Must specify format to construct Column.')
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {'ascii': ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ['The following keyword arguments to Column were invalid:']
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError('\n'.join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs['recformat']
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array,
(np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError('Data is inconsistent with the '
'format `{}`.'.format(format))
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ''
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + ' = ' + repr(value) + '; '
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if 'array' in self.__dict__:
return self.__dict__['array']
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if (hasattr(base, '_coldefs') and
isinstance(base._coldefs, ColDefs)):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if 'array' in self.__dict__:
del self.__dict__['array']
return
if getattr(base, 'base', None) is not None:
base = base.base
else:
break
self.__dict__['array'] = array
@array.deleter
def array(self):
try:
del self.__dict__['array']
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute('TTYPE')
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
'It is strongly recommended that column names contain only '
'upper and lower-case ASCII letters, digits, or underscores '
'for maximum compatibility with other software '
'(got {!r}).'.format(name), VerifyWarning)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if (not isinstance(name, str)
or len(str(Card('TTYPE', name))) != CARD_LENGTH):
raise AssertionError(
'Column name must be a string able to fit in a single '
'FITS card--typically this means a maximum of 68 '
'characters, though it may be fewer if the string '
'contains special characters like quotes.')
@ColumnAttribute('TCTYP')
def coord_type(col, coord_type):
if coord_type is None:
return
if (not isinstance(coord_type, str)
or len(coord_type) > 8):
raise AssertionError(
'Coordinate/axis type must be a string of atmost 8 '
'characters.')
@ColumnAttribute('TCUNI')
def coord_unit(col, coord_unit):
if (coord_unit is not None
and not isinstance(coord_unit, str)):
raise AssertionError(
'Coordinate/axis unit must be a string.')
@ColumnAttribute('TCRPX')
def coord_ref_point(col, coord_ref_point):
if (coord_ref_point is not None
and not isinstance(coord_ref_point, numbers.Real)):
raise AssertionError(
'Pixel coordinate of the reference point must be '
'real floating type.')
@ColumnAttribute('TCRVL')
def coord_ref_value(col, coord_ref_value):
if (coord_ref_value is not None
and not isinstance(coord_ref_value, numbers.Real)):
raise AssertionError(
'Coordinate value at reference point must be real '
'floating type.')
@ColumnAttribute('TCDLT')
def coord_inc(col, coord_inc):
if (coord_inc is not None
and not isinstance(coord_inc, numbers.Real)):
raise AssertionError(
'Coordinate increment must be real floating type.')
@ColumnAttribute('TRPOS')
def time_ref_pos(col, time_ref_pos):
if (time_ref_pos is not None
and not isinstance(time_ref_pos, str)):
raise AssertionError(
'Time reference position must be a string.')
format = ColumnAttribute('TFORM')
unit = ColumnAttribute('TUNIT')
null = ColumnAttribute('TNULL')
bscale = ColumnAttribute('TSCAL')
bzero = ColumnAttribute('TZERO')
disp = ColumnAttribute('TDISP')
start = ColumnAttribute('TBCOL')
dim = ColumnAttribute('TDIM')
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format='I') # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f'Illegal format `{format}`.')
return format, recformat
@classmethod
def _verify_keywords(cls, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None,
dim=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None,
coord_inc=None, time_ref_pos=None):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f'Column format option (TFORMn) failed verification: {err!s} '
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.')
invalid['format'] = (format, msg)
except AttributeError as err:
msg = (
f'Column format option (TFORMn) must be a string with a valid '
f'FITS table format (got {format!s}: {err!s}). '
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.')
invalid['format'] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [('name', name), ('unit', unit), ('bscale', bscale),
('bzero', bzero)]:
if v is not None and v != '':
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != '':
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null))
else:
tnull_formats = ('B', 'I', 'J', 'K')
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
'Column null option (TNULLn) must be an integer for '
'binary table columns (got {!r}). The invalid value '
'will be ignored for the purpose of formatting '
'the data in this column.'.format(null))
elif not (format.format in tnull_formats or
(format.format in ('P', 'Q') and
format.p_format in tnull_formats)):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
'Column null option (TNULLn) is invalid for binary '
'table columns of type {!r} (got {!r}). The invalid '
'value will be ignored for the purpose of formatting '
'the data in this column.'.format(format, null))
if msg is None:
valid['null'] = null
else:
invalid['null'] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != '':
msg = None
if not isinstance(disp, str):
msg = (
f'Column disp option (TDISPn) must be a string (got '
f'{disp!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.')
elif (isinstance(format, _AsciiColumnFormat) and
disp[0].upper() == 'L'):
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column.")
if msg is None:
try:
_parse_tdisp_format(disp)
valid['disp'] = disp
except VerifyError as err:
msg = (
f'Column disp option (TDISPn) failed verification: '
f'{err!s} The invalid value will be ignored for the '
'purpose of formatting the data in this column.')
invalid['disp'] = (disp, msg)
else:
invalid['disp'] = (disp, msg)
# Validate the start option
if start is not None and start != '':
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
'Column start option (TBCOLn) is not allowed for binary '
'table columns (got {!r}). The invalid keyword will be '
'ignored for the purpose of formatting the data in this '
'column.'.format(start))
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
'Column start option (TBCOLn) must be a positive integer '
'(got {!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.'.format(start))
if msg is None:
valid['start'] = start
else:
invalid['start'] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != '':
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
'Column dim option (TDIMn) is not allowed for ASCII table '
'columns (got {!r}). The invalid keyword will be ignored '
'for the purpose of formatting this column.'.format(dim))
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column.")
if dims_tuple:
if reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim))
if msg is None:
valid['dim'] = dims_tuple
else:
invalid['dim'] = (dim, msg)
if coord_type is not None and coord_type != '':
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type))
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type))
if msg is None:
valid['coord_type'] = coord_type
else:
invalid['coord_type'] = (coord_type, msg)
if coord_unit is not None and coord_unit != '':
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit))
if msg is None:
valid['coord_unit'] = coord_unit
else:
invalid['coord_unit'] = (coord_unit, msg)
for k, v in [('coord_ref_point', coord_ref_point),
('coord_ref_value', coord_ref_value),
('coord_inc', coord_inc)]:
if v is not None and v != '':
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got {!r}). "
"The invalid value will be ignored for the purpose of formatting "
"the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v))
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != '':
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos))
if msg is None:
valid['time_ref_pos'] = time_ref_pos
else:
invalid['time_ref_pos'] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format,
_AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
'Columns cannot have both a start (TCOLn) and dim '
'(TDIMn) option, since the former is only applies to '
'ASCII tables, and the latter is only valid for binary '
'tables.')
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (_AsciiColumnFormat
if guess_format is _ColumnFormat
else _ColumnFormat)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims:
shape = dims[:-1] if 'A' in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if 'P' in format or 'Q' in format:
return array
elif 'A' in format:
if array.dtype.char in 'SU':
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif 'L' in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype('bool'):
return np.where(array == np.False_, ord('F'), ord('T'))
else:
return np.where(array == 0, ord('F'), ord('T'))
elif 'X' in format:
return _convert_array(array, np.dtype('uint8'))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31),
8: np.uint64(2**63)}
if (array.dtype.kind == 'u' and
array.dtype.itemsize in bzeros and
self.bscale in (1, None, '') and
self.bzero == bzeros[array.dtype.itemsize]):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace('i', 'u')
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = '\x00'
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if (hasattr(input, '_columns_type') and
issubclass(input._columns_type, ColDefs)):
klass = input._columns_type
elif (hasattr(input, '_col_format_cls') and
issubclass(input._col_format_cls, _AsciiColumnFormat)):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .hdu.table import _TableBaseHDU
from .fitsrec import FITS_rec
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and
input._coldefs):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError('Input to ColDefs must be a table HDU, a list '
'of Columns, or a record/field array.')
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f'Element {idx} in the ColDefs input is not a Column.')
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
# (typically just 1D)
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or 'A' in format):
if 'A' in format:
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (array.dtype[idx].base.itemsize,) + dim
dim = '(' + ','.join(str(d) for d in dim) + ')'
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == 'u':
if 'I' in format:
bzero = np.uint16(2**15)
elif 'J' in format:
bzero = np.uint32(2**31)
elif 'K' in format:
bzero = np.uint64(2**63)
c = Column(name=cname, format=format,
array=array.view(np.ndarray)[cname], bzero=bzero,
dim=dim)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr['TFIELDS']
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group('label')
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group('num'))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == 'format':
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f'Invalid keyword for column {idx + 1}: {val[1]}',
VerifyWarning)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs['recformat']
if 'dim' in valid_kwargs:
valid_kwargs['dim'] = kwargs['dim']
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]['array'] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if (new_column.disp is not None and
new_column.disp.upper().startswith('L')):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == 's':
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else '')
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim:
if format_.format == 'A':
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({'names': self.names,
'formats': formats,
'offsets': offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = 'ColDefs('
if hasattr(self, 'columns') and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += '\n '
rep += '\n '.join([repr(c) for c in self.columns])
rep += '\n'
rep += ')'
return rep
def __add__(self, other, option='left'):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError('Wrong type of input.')
if option == 'left':
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, 'right')
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value,
new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == 'name':
del self.names
elif attr == 'format':
del self.formats
self._notify('column_attribute_changed', column, idx, attr, old_value,
new_value)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify('load_data')
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify('column_added', self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify('load_data')
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify('column_removed', self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f'New name {new_name} already exists.')
else:
self.change_attrib(col_name, 'name', new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, 'unit', new_unit)
def info(self, attrib='all', output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ['all', '']:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(',')
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == 's':
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write("'{}' is not an attribute of the column "
"definitions.\n".format(attr))
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + 's')
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = ' '
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = 'S' + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ['a' + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype='a'):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == 'a':
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(
f'Inconsistent input data array: {input}')
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a,
dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == 'a':
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
self.max = max(self.max, len(value))
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8')
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == 'a':
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == 'a':
rowval = ' ' * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == 'a':
data_output[idx] = chararray.array(encode_ascii(rowval),
itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
descr_output[idx, 0] = len(data_output[idx])
descr_output[idx, 1] = _offset
_offset += len(data_output[idx]) * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f'Format {tform!r} is not recognized.')
if repeat == '':
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f'Format {tform!r} is not recognized.')
# Be flexible on case
format = match.group('format')
if format is None:
# Floating point format
format = match.group('formatf').upper()
width = match.group('widthf')
precision = match.group('precision')
if width is None or precision is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group('width')
if width is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = ('Format {!r} is not valid--field width and decimal precision '
'must be integers.')
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError("Format {!r} not valid--field width must be a "
"positive integeter.".format(tform))
if precision >= width:
raise VerifyError("Format {!r} not valid--the number of decimal digits "
"must be less than the format's total "
"width {}.".format(tform, width))
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group('dims')
return tuple(int(d.strip()) for d in dims.split(','))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == 'a' and f2[0] == 'a':
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == 'A':
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == 'A' and option != '':
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ''
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == 'X':
output_format = _FormatX(repeat)
elif dtype == 'P':
output_format = _FormatP.from_tform(format)
elif dtype == 'Q':
output_format = _FormatQ.from_tform(format)
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError(f'Illegal format `{format}`.')
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == 'U':
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base--dtype may be a multi-dimensional dtype
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype='i8').prod()
if nel > 1:
repeat = nel
if kind == 'a':
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + 'A'
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ''
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f'Illegal format `{format}`.')
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ('U', 'S'):
recformat = kind = 'a'
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == 'a':
return 'A' + str(itemsize)
elif NUMPY2FITS.get(recformat) == 'L':
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return 'A1'
elif kind == 'i':
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS['I'][0])
return 'I' + str(width)
elif kind == 'f':
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = 'D'
else:
format = 'E'
width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accomodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == 'I':
if width <= 4:
recformat = 'i2'
elif width > 9:
recformat = 'i8'
elif format == 'A':
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions
Parameters
----------
tdisp: str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = tdisp[0] if tdisp[0] != 'E' or (
len(tdisp) > 1 and tdisp[1] not in 'NS') else tdisp[:2]
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f'Format {tdisp} is not recognized.')
match = tdisp_re.match(tdisp.strip())
if not match or match.group('formatc') is None:
raise VerifyError(f'Format {tdisp} is not recognized.')
formatc = match.group('formatc')
width = match.group('width')
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'):
precision = match.group('precision')
if precision is None:
precision = 1
if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'):
exponential = match.group('exponential')
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp: str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f'Format {format_type} is not recognized.')
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string: str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype: bool
True is this format type should be a logical type, 'L'. Needs special
handeling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z',
'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E',
'E': 'E'}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == '{' and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip('}')
elif format_string[0] == '%':
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = '', ''
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0':
ftype = fmt_to_tdisp['a']
width = fmt_str[1:]
elif fmt_str[-1] == 's' and fmt_str != 's':
ftype = fmt_to_tdisp['a']
width = fmt_str[:-1].lstrip('0')
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0':
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if '.' in fmt_str:
width, precision = fmt_str.split('.')
sep = '.'
if width == "":
ascii_key = ftype if ftype != 'G' else 'F'
width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] -
ASCII_DEFAULT_WIDTHS[ascii_key][1]))
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn('Format {} cannot be mapped to the accepted '
'TDISPn keyword values. Format will not be '
'moved into TDISPn keyword.'.format(format_string),
AstropyUserWarning)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = 'L'
return ftype + width + sep + precision
| bsd-3-clause |
LennonChin/Django-Practices | MxShop/apps/goods/views.py | 1 | 2837 | from .serializers import GoodsSerializer, CategorySerializer
from rest_framework import mixins
from rest_framework import generics, viewsets, filters
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from rest_framework.authentication import TokenAuthentication
from rest_framework.throttling import AnonRateThrottle, UserRateThrottle
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from .models import Goods, GoodsCategory, Banner
from .filter import GoodsFilter
from .serializers import BannerSerializer, IndexCategorySerializer
# Create your views here.
class GoodsPagination(PageNumberPagination):
page_size = 12
page_size_query_param = 'page_size'
page_query_param = 'page'
max_page_size = 100
class GoodsListView(generics.ListAPIView):
"""
商品列表页
"""
queryset = Goods.objects.all()
serializer_class = GoodsSerializer
pagination_class = GoodsPagination
class GoodsListViewset(CacheResponseMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
List:
商品列表页
"""
throttle_classes = (AnonRateThrottle, UserRateThrottle)
queryset = Goods.objects.all()
serializer_class = GoodsSerializer
pagination_class = GoodsPagination
# 使用JWT不能开启这个Token方式的认证
# authentication_classes = (TokenAuthentication, )
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = GoodsFilter
search_fields = ('name', 'goods_brief', 'goods_desc')
ordering_fields = ('sold_num', 'shop_price')
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
instance.click_num += 1
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class CategoryViewset(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
list:
商品分类列表数据
retrieve:
获取商品分类详情
"""
queryset = GoodsCategory.objects.filter(category_type=1)
serializer_class = CategorySerializer
class BannerViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
"""
获取轮播图列表
"""
queryset = Banner.objects.all().order_by("-index")
serializer_class = BannerSerializer
class IndexCategoryViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
"""
首页商品分类数据
"""
queryset = GoodsCategory.objects.filter(is_tab=True, name__in=["生鲜食品", "酒水饮料"])
serializer_class = IndexCategorySerializer
| apache-2.0 |
timgraham/django-cms | cms/utils/admin.py | 4 | 4743 | # -*- coding: utf-8 -*-
import json
from collections import defaultdict
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.sites.models import Site
from django.http import HttpResponse
from django.template.loader import get_template
from django.utils.encoding import smart_str
from cms.models import EmptyTitle, Title
from cms.utils import get_language_from_request, get_language_list, get_cms_setting
from cms.utils import page_permissions
NOT_FOUND_RESPONSE = "NotFound"
def jsonify_request(response):
""" Turn any response in a 200 response to let jQuery code handle it nicely.
Response contains a json object with the following attributes:
* status: original response status code
* content: original response content
"""
content = {'status': response.status_code, 'content': smart_str(response.content, response.charset)}
return HttpResponse(json.dumps(content), content_type="application/json")
def render_admin_rows(request, pages, site, filtered=False, language=None):
"""
Used for rendering the page tree, inserts into context everything what
we need for single item
"""
user = request.user
site = Site.objects.get_current()
lang = get_language_from_request(request)
permissions_on = get_cms_setting('PERMISSION')
user_can_add = page_permissions.user_can_add_subpage
user_can_move = page_permissions.user_can_move_page
user_can_change = page_permissions.user_can_change_page
user_can_change_advanced_settings = page_permissions.user_can_change_page_advanced_settings
user_can_publish = page_permissions.user_can_publish_page
template = get_template('admin/cms/page/tree/menu.html')
if not language:
language = get_language_from_request(request)
filtered = filtered or request.GET.get('q')
if filtered:
# When the tree is filtered, it's displayed as a flat structure
# therefore there's no concept of open nodes.
open_nodes = []
else:
open_nodes = list(map(int, request.GET.getlist('openNodes[]')))
languages = get_language_list(site.pk)
page_ids = []
for page in pages:
page_ids.append(page.pk)
if page.publisher_public_id:
page_ids.append(page.publisher_public_id)
cms_title_cache = defaultdict(dict)
cms_page_titles = Title.objects.filter(
page__in=page_ids,
language__in=languages
)
for cms_title in cms_page_titles.iterator():
cms_title_cache[cms_title.page_id][cms_title.language] = cms_title
def render_page_row(page):
page_cache = cms_title_cache[page.pk]
for language in languages:
page_cache.setdefault(language, EmptyTitle(language=language))
page.title_cache = cms_title_cache[page.pk]
if page.publisher_public_id:
publisher_cache = cms_title_cache[page.publisher_public_id]
for language in languages:
publisher_cache.setdefault(language, EmptyTitle(language=language))
page.publisher_public.title_cache = publisher_cache
has_move_page_permission = user_can_move(user, page)
metadata = ""
if permissions_on and not has_move_page_permission:
# jstree metadata generator
md = [('valid_children', False), ('draggable', False)]
# just turn it into simple javascript object
metadata = "{" + ", ".join(map(lambda e: "%s: %s" % (e[0],
isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + "}"
if filtered:
children = page.children.none()
else:
children = page.get_children()
context = {
'request': request,
'page': page,
'site': site,
'lang': lang,
'filtered': filtered,
'metadata': metadata,
'page_languages': page.get_languages(),
'preview_language': lang,
'has_add_page_permission': user_can_add(user, target=page),
'has_change_permission': user_can_change(user, page),
'has_change_advanced_settings_permission': user_can_change_advanced_settings(user, page),
'has_publish_permission': user_can_publish(user, page),
'has_move_page_permission': has_move_page_permission,
'children': children,
'site_languages': languages,
'open_nodes': open_nodes,
'cms_current_site': site,
'is_popup': (IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET)
}
return template.render(context)
rendered = (render_page_row(page) for page in pages)
return ''.join(rendered)
| bsd-3-clause |
petrjasek/superdesk-ntb | server/ntb/io/feed_parsers/afp_newsml.py | 2 | 1977 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2018 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk import etree
from superdesk.io.registry import register_feed_parser
from superdesk.io.feed_parsers.afp_newsml_1_2 import AFPNewsMLOneFeedParser
from .utils import ingest_category_from_subject, filter_missing_subjects, set_default_service
class NTBAFPNewsMLParser(AFPNewsMLOneFeedParser):
NAME = 'ntbafpnewsml'
label = 'NTB AFP NewsML Parser'
def parse(self, xml, provider=None):
item = super().parse(xml, provider)
item['slugline'] = ''
category = ingest_category_from_subject(item.get('subject')) # check for sports using all ingested subjects
item['subject'] = filter_missing_subjects(item.get('subject'))
item['subject'].append(category)
urgency = item.get('urgency', None)
if urgency == 2:
item['urgency'] = 3
elif urgency == 4:
item['urgency'] = 5
set_default_service(item)
if not item.get('headline') and item.get('body_html'):
first_line = item.get('body_html').strip().split('\n')[0]
parsed_headline = etree.parse_html(first_line, 'html')
item['headline'] = etree.to_string(parsed_headline, method="text").strip().split('\n')[0]
return item
def parse_newslines(self, item, tree):
super().parse_newslines(item, tree)
newsline_type = tree.find(
'NewsItem/NewsComponent/NewsLines/NewsLine/NewsLineType[@FormalName="AdvisoryLine"]'
)
if newsline_type is not None and newsline_type.getnext() is not None:
item['ednote'] = newsline_type.getnext().text or ''
register_feed_parser(NTBAFPNewsMLParser.NAME, NTBAFPNewsMLParser())
| agpl-3.0 |
alheinecke/tensorflow-xsmm | tensorflow/contrib/distributions/python/ops/uniform.py | 11 | 6929 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where:
* `low = a`,
* `high = b`,
* `Z` is the normalizing constant, and,
* `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = locals()
with ops.name_scope(name, values=[low, high]):
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
contrib_tensor_util.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _log_prob(self, x):
return math_ops.log(self._prob(x))
def _prob(self, x):
broadcasted_x = x * array_ops.ones(self.batch_shape_tensor())
return array_ops.where(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
array_ops.where(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
| apache-2.0 |
Subsets and Splits