Dataset Viewer
prefix
stringlengths 0
918k
| middle
stringlengths 0
812k
| suffix
stringlengths 0
962k
|
---|---|---|
from . import _ccallback_c
import ctypes
PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
ffi = None
class CData(object):
pass
def _import_cffi():
global ffi, CData
if ffi is not None:
return
try:
import cffi
ffi = cffi.FFI()
CData = ffi.CData
except ImportError:
ffi = False
class LowLevelCallable(tuple):
"""
Low-level callback function.
Parameters
----------
function : {PyCapsule, ctypes function pointer, cffi function pointer}
Low-level callback function.
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*,
if possible.
Attributes
----------
function
Callback function given
user_data
User data given
signature
Signature of the function.
Methods
-------
from_cython
Class method for constructing callables from Cython C-exported
functions.
Notes
-----
The argument ``function`` can be one of:
- PyCapsule, whose name contains the C function signature
- ctypes function pointer
- cffi function pointer
The signature of the low-level callback must match one of those expected
by the routine it is passed to.
If constructing low-level functions from a PyCapsule, the name of the
capsule must be the corresponding signature, in the format::
return_type (arg1_type, arg2_type, ...)
For example::
"void (double)"
"double (double, int *, void *)"
The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
if an explicit value for ``user_data`` was not given.
"""
# Make the class immutable
__slots__ = ()
def __new__(cls, function, user_data=None, signature=None):
# We need to hold a reference to the function & user data,
# to prevent them going out of scope
item = cls._parse_callback(function, user_data, signature)
return tuple.__new__(cls, (item, function, user_data))
def __repr__(self):
return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data)
@property
def function(self):
return tuple.__getitem__(self, 1)
@property
def user_data(self):
return tuple.__getitem__(self, 2)
@property
def signature(self):
return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
def __getitem__(self, idx):
raise ValueError()
@classmethod
def from_cython(cls, module, name, user_data=None, signature=None):
"""
Create a low-level callback function from an exported Cython function.
Parameters
----------
module : module
Cython module where the exported function resides
name : str
Name of the exported function
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*.
"""
try:
function = module.__pyx_capi__[name]
except AttributeError:
raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute")
except KeyError:
raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name))
return cls(function, user_data, signature)
@classmethod
def _parse_callback(cls, obj, user_data=None, signature=None):
_import_cffi()
if isinstance(obj, LowLevelCallable):
func = tuple.__getitem__(obj, 0)
elif isinstance(obj, PyCFuncPtr):
func, signature = _get_ctypes_func(obj, signature)
elif isinstance(obj, CData):
func, signature = _get_cffi_func(obj, signature)
elif _ccallback_c.check_capsule(obj):
func = obj
else:
raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)")
if isinstance(user_data, ctypes.c_void_p):
context = _get_ctypes_data(user_data)
elif isinstance(user_data, CData):
context = _get_cffi_data(user_data)
elif user_data is None:
context = 0
elif _ccallback_c.check_capsule(user_data):
context = user_data
else:
raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)")
return _ccallback_c.get_raw_capsule(func, signature, context)
#
# ctypes helpers
#
def _get_ctypes_func(func, signature=None):
# Get function pointer
func_ptr = ctypes.cast(func, ctypes.c_void_p).value
# Construct function signature
if signature is None:
signature = _typename_from_ctypes(func.restype) + " ("
for j, arg in enumerate(func.argtypes):
if j == 0:
signature += _typename_from_ctypes(arg)
else:
signature += ", " + _typename_from_ctypes(arg)
signature += ")"
return func_ptr, signature
def _typename_from_ctypes(item):
if item is None:
return "void"
elif item is ctypes.c_void_p:
return "void *"
name = item.__name__
pointer_level = 0
while name.startswith("LP_"):
pointer_level += 1
name = name[3:]
if name.startswith('c_'):
name = name[2: | ]
if pointer_level > 0:
name += " " + "*"*pointer_level
return name
def _get_ctypes_data(data):
# Get voidp pointer
return ctypes.cast(data, ctypes.c_void_p).value
#
# CFFI helpers
#
def _get_cffi_func(func, signature=None):
# Get function point | er
func_ptr = ffi.cast('uintptr_t', func)
# Get signature
if signature is None:
signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
return func_ptr, signature
def _get_cffi_data(data):
# Get pointer
return ffi.cast('uintptr_t', data)
|
#!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class JsonErrorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
| """
Swagger model
:param dict | swaggerTypes: The key is attribute name and the value is attribute type.
:param dict attributeMap: The key is attribute name and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'message': 'str'
}
self.attribute_map = {
'status': 'status',
'message': 'message'
}
# Status: \"ok\" or \"error\"
self.status = None # str
# Error message
self.message = None # str
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'swaggerTypes' and p != 'attributeMap':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
|
# -*- coding: utf-8 -*-
appid = 'example'
apikey = 'c5dd7e7dkjp27377l903c42c032b413b'
sender = '01000000000' | # FIXME - MUST BE CHANGED AS REAL PHONE NUMBER
receivers = ['01000000000', ] | # FIXME - MUST BE CHANGED AS REAL PHONE NUMBERS
content = u'나는 유리를 먹을 수 있어요. 그래도 아프지 않아요'
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for software that is configured with CMake, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.environment import setvar
from easybuild.tools.run import run_cmd
class CMakeMake(ConfigureMake):
"""Support for configuring build with CMake instead of traditional configure script"""
@staticmethod
def extra_options(extra_vars=None):
"""Define extra easyconfig parameters specific to CMakeMake."""
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM],
'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM],
})
return extra_vars
def configure_step(self, srcdir=None, builddir=None):
"""Configure build using cmake"""
if builddir is not None:
self.log.nosupport("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0")
# Set the search paths for CMake
include_paths = os.pathsep.join(self.toolchain.get_variable("CPPFLAGS", list))
library_paths = os.pathsep.join(self.toolchain.get_variable("LDFLAGS", list))
setvar("CMAKE_INCLUDE_PATH", include_paths)
setvar("CMAKE_LIBRARY_PATH", library_paths)
default_srcdir = '.'
if self.cfg.get('separate_build_dir', False):
objdir = os.path.join(self.builddir, 'easybuild_obj')
try:
os.mkdir(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to create separate build dir %s in %s: %s", objdir, os.getcwd(), err)
default_srcdir = self.cfg['start_dir']
if srcdir is None:
if self.cfg.get('srcdir', None) is not None:
srcdir = self.cfg['srcdir']
else:
srcdir = default_srcdir
options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir]
env_to_options = {
'CC': 'CMAKE_C_COMPILER',
'CFLAGS': 'CMAKE_C_FLAGS',
'CXX': 'CMAKE_CXX_COMPILER',
'CXXFLAGS': 'CMAKE_CXX_FLAGS',
'F90': 'CMAKE_Fortran_COMPILER',
'FFLAGS': 'CMAKE_Fortran_FLAGS',
}
for env_name, option in env_to_options.items():
value = os.getenv(env_name)
if value is not None:
options.append("-D%s='%s'" % (option, value))
if build_option('rpath'):
# instruct CMake not to fiddle with RPATH when --rpath is used, since it will undo stuff on install...
# https://github.com/LLNL/spack/blob/0f6a5cd38538e8969d11bd2167f11060b1f53b43/ | lib/spack/spack/build_environment.py#L416
options.append('-DCMAKE_SKIP_RPATH=ON')
# show what CMake is doing by default
options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
options_string = ' '.join(options)
command = "%s cmake %s %s %s" % (self.cfg['preconfigopts'], srcdir, options_string, s | elf.cfg['configopts'])
(out, _) = run_cmd(command, log_all=True, simple=False)
return out
|
# coding=utf-8
from __future__ import unicode_literals
"""
Name: MyArgparse
Author: Andy Liu
Email : [email protected]
Created: 3/26/2015
Copyright: All rights reserved.
Licence: This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu | .org/licenses/>.
"""
import argparse
import logging
def parse_command_line():
parser = argparse.ArgumentParser(prog='PROG', description='%(prog)s can ...')
parser.add_argument('NoPre', action="store", help='help information')
parser.add_argument('-t', action="store_true", | dest='boolean_switch', default=False, help='Set a switch to true')
parser.add_argument('-f', action="store_false", dest='boolean_switch', default=True, help='Set a switch to false')
parser.add_argument('-s', action="store", dest='simple_value', help="Store a simple value")
parser.add_argument('-st', action="store", dest="simple_value", type=int,
help='Store a simple value and define type')
parser.add_argument('-c', action='store_const', dest='constant_value', const='value-to-store',
help='Store a constant value')
parser.add_argument('-a', action='append', dest='collection', default=[], help='Add repeated values to a list')
parser.add_argument('-A', action='append_const', dest='const_collection', const='value-1-to-append', default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection', const='value-2-to-append',
help='Add different values to list')
args = parser.parse_args()
logging.debug('NoPre = %r' % args.NoPre)
logging.debug('simple_value = %r' % args.simple_value)
logging.debug('constant_value = %r' % args.constant_value)
logging.debug('boolean_switch = %r' % args.boolean_switch)
logging.debug('collection = %r' % args.collection)
logging.debug('const_collection = %r' % args.const_collection)
return args
if __name__ == '__main__':
from MyLog import init_logger
logger = init_logger()
parse_command_line()
|
from dj | ango.contrib import admin
from trainer.models import Language, Word, Card, Set
admin.site.register(Language)
admin.site.register(Word)
admin.site.register(Card)
admin.site.regis | ter(Set)
|
oint registration or to quit
event_first_of(
event_healthy,
event_stop,
).wait()
while True:
data_or_stop.wait()
if event_stop.is_set():
return
# The queue is not empty at this point, so this won't raise Empty.
# This task being the only consumer is a requirement.
data = queue.peek(block=False)
backoff = timeout_exponential_backoff(
message_retries,
message_retry_timeout,
message_retry | _max_timeout,
)
acknowledged = retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff,
)
i | f acknowledged:
queue.get()
# Checking the length of the queue does not trigger a
# context-switch, so it's safe to assume the length of the queue
# won't change under our feet and when a new item will be added the
# event will be set again.
if not queue:
data_or_stop.clear()
if event_stop.is_set():
return
def healthcheck(
protocol,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
ping_nonce):
""" Sends a periodical Ping to `receiver_address` to check its health. """
# The state of the node is unknown, the events are set to allow the tasks
# to do work.
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNKNOWN,
)
# Always call `clear` before `set`, since only `set` does context-switches
# it's easier to reason about tasks that are waiting on both events.
# Wait for the end-point registration or for the node to quit
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
event_healthy.clear()
event_unhealthy.set()
backoff = timeout_exponential_backoff(
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
)
sleep = next(backoff)
while not event_stop.wait(sleep):
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
sleep = next(backoff)
else:
break
# Don't wait to send the first Ping and to start sending messages if the
# endpoint is known
sleep = 0
event_unhealthy.clear()
event_healthy.set()
while not event_stop.wait(sleep):
sleep = nat_keepalive_timeout
ping_nonce['nonce'] += 1
data = protocol.get_ping(
ping_nonce['nonce'],
)
# Send Ping a few times before setting the node as unreachable
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
[nat_keepalive_timeout] * nat_keepalive_retries,
)
if event_stop.is_set():
return
if not acknowledged:
# The node is not healthy, clear the event to stop all queue
# tasks
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNREACHABLE,
)
event_healthy.clear()
event_unhealthy.set()
# Retry until recovery, used for:
# - Checking node status.
# - Nat punching.
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
repeat(nat_invitation_timeout),
)
if acknowledged:
event_unhealthy.clear()
event_healthy.set()
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_REACHABLE,
)
class RaidenProtocol(object):
""" Encode the message into a packet and send it.
Each message received is stored by hash and if it is received twice the
previous answer is resent.
Repeat sending messages until an acknowledgment is received or the maximum
number of retries is hit.
"""
def __init__(
self,
transport,
discovery,
raiden,
retry_interval,
retries_before_backoff,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout):
self.transport = transport
self.discovery = discovery
self.raiden = raiden
self.retry_interval = retry_interval
self.retries_before_backoff = retries_before_backoff
self.nat_keepalive_retries = nat_keepalive_retries
self.nat_keepalive_timeout = nat_keepalive_timeout
self.nat_invitation_timeout = nat_invitation_timeout
self.event_stop = Event()
self.channel_queue = dict() # TODO: Change keys to the channel address
self.greenlets = list()
self.addresses_events = dict()
self.nodeaddresses_networkstatuses = defaultdict(lambda: NODE_NETWORK_UNKNOWN)
# Maps the echohash of received and *sucessfully* processed messages to
# its Ack, used to ignored duplicate messages and resend the Ack.
self.receivedhashes_to_acks = dict()
# Maps the echohash to a SentMessageState
self.senthashes_to_states = dict()
# Maps the addresses to a dict with the latest nonce (using a dict
# because python integers are immutable)
self.nodeaddresses_to_nonces = dict()
cache = cachetools.TTLCache(
maxsize=50,
ttl=CACHE_TTL,
)
cache_wrapper = cachetools.cached(cache=cache)
self.get_host_port = cache_wrapper(discovery.get)
def start(self):
self.transport.start()
def stop_and_wait(self):
# Stop handling incoming packets, but don't close the socket. The
# socket can only be safely closed after all outgoing tasks are stopped
self.transport.stop_accepting()
# Stop processing the outgoing queues
self.event_stop.set()
gevent.wait(self.greenlets)
# All outgoing tasks are stopped. Now it's safe to close the socket. At
# this point there might be some incoming message being processed,
# keeping the socket open is not useful for these.
self.transport.stop()
# Set all the pending results to False
for waitack in self.senthashes_to_states.itervalues():
waitack.async_result.set(False)
def get_health_events(self, receiver_address):
""" Starts a healthcheck taks for `receiver_address` and returns a
HealthEvents with locks to react on its current state.
"""
if receiver_address not in self.addresses_events:
self.start_health_check(receiver_address)
return self.addresses_events[receiver_address]
def start_health_check(self, receiver_address):
""" Starts a task for healthchecking `receiver_address` if there is not
one yet.
"""
if receiver_address not in self.addresses_events:
ping_nonce = self.nodeaddresses_to_nonces.setdefault(
receiver_address,
{'nonce': 0}, # HACK: Allows the task to mutate the object
)
events = HealthEvents(
event_healthy=Event(),
event_unhealthy=Event(),
)
self.addresses_events[receiver_address] = events
self.greenlets.append(gevent.spawn(
healthcheck,
self,
receiver_address,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.nat_keepalive_retries,
self.nat_keepalive_timeout,
self.nat_invitation_timeout,
ping_nonce,
))
|
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, eit | her version 3 of the License, or
# (at your option) any later version.
#
# The LOF | AR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
'''default config for webservice'''
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
print('default config loaded')
|
convertPrio(cal.vtodo.priority.value) if hasattr(cal.vtodo, 'priority') else None
}
fakePOST = QueryDict(mutable=True)
fakePOST.update(params)
form = SimpleTickets(fakePOST)
if form.is_valid():
cd = form.cleaned_data
ticket = get_ticket_model()
# change ticket
try:
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.caption = cd['caption']
tic.description = cd['description']
tic.priority = cd['priority']
# tic.assigned = cd['assigned']
tic.show_start = cd['show_start']
tic.save(user=request.user)
# new ticket
except ticket.DoesNotExist:
tic = ticket()
tic.caption = cd['caption']
tic.description = cd['description']
if 'priority' not in cd or not cd['priority']:
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_PRIORITY') and settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY:
tic.priority_id = settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY
else:
tic.priority = cd['priority']
tic.assigned = request.user
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_CUSTOMER') and settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER:
if settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER == -1:
tic.customer = request.organisation
| else:
tic.customer_id = settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOME
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_COMPONENT') and settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT:
tic.component_id = settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT
tic.show_start = cd['show_start']
| tic.uuid = cal.vtodo.uid.value
tic.save(user=request.user)
if tic.assigned:
touch_ticket(tic.assigned, tic.pk)
for ele in form.changed_data:
form.initial[ele] = ''
remember_changes(request, form, tic)
touch_ticket(request.user, tic.pk)
mail_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_MAIL_RCPT, is_api=True)
jabber_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_JABBER_RCPT, is_api=True)
else:
raise Exception(form.errors)
def remove(self, name):
pass
def replace(self, name, text):
self.append(name, text)
@property
def text(self):
return ical.serialize(self.tag, self.headers, self.items.values())
@classmethod
def children(cls, path):
"""Yield the children of the collection at local ``path``."""
request = cls._getRequestFromUrl(path)
children = list(tickets_reports.objects.filter(active_record=True, c_user=request.user).values_list('slug', flat=True))
children = ['%s/%s.ics' % (request.user.username, itm) for itm in children]
return map(cls, children)
@classmethod
def is_node(cls, path):
"""Return ``True`` if relative ``path`` is a node.
A node is a WebDAV collection whose members are other collections.
"""
request = cls._getRequestFromUrl(path)
if path == request.user.username:
return True
else:
return False
@classmethod
def is_leaf(cls, path):
"""Return ``True`` if relative ``path`` is a leaf.
A leaf is a WebDAV collection whose members are not collections.
"""
result = False
if '.ics' in path:
try:
request = cls._getRequestFromUrl(path)
rep = tickets_reports.objects.get(active_record=True, pk=cls._getReportFromUrl(path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
result = (tic.exists())
except Exception:
import sys
a = sys.exc_info()
return result
@property
def last_modified(self):
try:
request = self._getRequestFromUrl(self.path)
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
date = tic.latest('u_date')
return datetime.strftime(
date.last_action_date, '%a, %d %b %Y %H:%M:%S %z')
except Exception:
import sys
a = sys.exc_info()
@property
def tag(self):
with self.props as props:
if 'tag' not in props:
props['tag'] = 'VCALENDAR'
return props['tag']
@property
@contextmanager
def props(self):
# On enter
properties = {}
try:
props = DBProperties.objects.get(path=self.path)
except DBProperties.DoesNotExist:
pass
else:
properties.update(json.loads(props.text))
old_properties = properties.copy()
yield properties
# On exit
if old_properties != properties:
props, created = DBProperties.objects.get_or_create(path=self.path)
props.text = json.dumps(properties)
props.save()
@property
def items(self):
itms = {}
try:
request = self._getRequestFromUrl(self.path)
if self.path == request.user.username:
return itms
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
for item in tic:
text = self._itemToICal(item)
itms.update(self._parse(text, ICAL_TYPES))
except Exception:
import sys
a = sys.exc_info()
return itms
@classmethod
def _getRequestFromUrl(cls, path):
user = path.split('/')[0]
request = FakeRequest()
request.user = User.objects.get(username=user)
request.organisation = UserProfile.objects.get(user=request.user).organisation
return request
@classmethod
def _getReportFromUrl(cls, path):
if '.ics' in path:
file = path.split('/')[-1]
file = file.replace('.ics', '')
repid = tickets_reports.objects.get(active_record=True, slug=file).pk
return repid
return 0
@classmethod
def _itemToICal(cls, item):
cal = vobject.iCalendar()
cal.add('vtodo')
cal.vtodo.add('summary').value = item.caption
cal.vtodo.add('uid').value = str(item.uuid)
cal.vtodo.add('created').value = item.c_date
if item.closed:
cal.vtodo.add('status').value = 'COMPLETED'
if item.priority:
cal.vtodo.add('priority').value = str(item.priority.caldav)
else:
cal.vtodo.add('priority').value = '0'
if item.description:
cal.vtodo.add('description').value = item.description
if item.show_start:
# cal.vtodo.add('dstart').value = item.show_start
cal.vtodo.add('due').value = item.show_start
cal.vtodo.add('vala |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
fr | om mcfw.properties import bool_property, unicode_list_property, unicode_property, typed_property
class BankDataTO(object):
bankCode = unicode_property('bankCode')
name = unicode_property('name')
bic = unicode_property('bic')
class OpenIbanResultTO(object):
valid = bool_property('valid')
messages = unicode_list_property('message')
iban = unicode_property('iban') |
bankData = typed_property('bankData', BankDataTO) # type: BankDataTO
checkResults = typed_property('checkResults', dict)
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it | under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public L | icense for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .module import CmsoModule
__all__ = ['CmsoModule']
|
if self.status == self.INACTIVE:
# If these fields have already been changed, don't
# override those changes. Don't unset the name field
# if no further data is available.
if self.name == self.feed_url:
self.name = video_iter.title or self.name
if not self.webpage:
self.webpage = video_iter.webpage or ''
if not self.description:
self.description = video_iter.description or ''
self.save()
super(Feed, self).update(video_iter, source_import=feed_import,
**kwargs)
def source_type(self):
return self.calculated_source_type
def _calculate_source_type(self):
video_service = self.video_service()
if video_service is None:
return u'Feed'
else:
return u'User: %s' % video_service
def video_service(self):
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, self.feed_url, re.I):
return service
def pre_save_set_calculated_source_type(instance, **kwargs):
# Always save the calculated_source_type
instance.calculated_source_type = instance._calculate_source_type()
# Plus, if the name changed, we have to recalculate all the Videos that depend on us.
try:
v = Feed.objects.get(id=instance.id)
except Feed.DoesNotExist:
return instance
if v.name != instance.name:
# recalculate all the sad little videos' calculated_source_type
for vid in instance.video_set.all():
vid.save()
models.signals.pre_save.connect(pre_save_set_calculated_source_type,
sender=Feed)
class Category(MPTTModel):
"""
A category for videos to be contained in.
Categories and tags aren't too different functionally, but categories are
more strict as they can't be defined by visitors. Categories can also be
hierarchical.
Fields:
- site: A link to the django.contrib.sites.models.Site object this object
is bound to
- name: Name of this category
- slug: a slugified verison of the name, used to create more friendly URLs
- logo: An image to associate with this category
- description: human readable description of this item
- parent: Reference to another Category. Allows you to have heirarchical
categories.
"""
site = models.ForeignKey(Site)
name = models.CharField(
max_length=80, verbose_name='Category Name',
help_text=_("The name is used to identify the category almost "
"everywhere; for example, under a video or in a "
"category widget."))
slug = models.SlugField(
verbose_name='Category Slug',
help_text=_("The \"slug\" is the URL-friendly version of the name. It "
"is usually lower-case and contains only letters, numbers "
"and hyphens."))
logo = models.ImageField(
upload_to=utils.UploadTo('localtv/category/logo/%Y/%m/%d/'),
blank=True,
verbose_name='Thumbnail/Logo',
help_text=_("Optional. For example: a leaf for 'environment' or the "
"logo of a university department."))
description = models.TextField(
blank=True, verbose_name='Description (HTML)',
help_text=_("Optional. The description is not prominent by default, but"
" some themes may | show it."))
parent = models.ForeignKey(
'self', blank=True, null=True,
related_name='child_set',
verbose_name='Category Parent',
help_text=_("Categories, unlike tags, can have a hierarchy."))
class MPTTMeta:
order_insertion_by = ['name']
class Meta:
unique_together = (
('slug', 'site'),
('name', 'site'))
def __unicode__(se | lf):
return self.name
def dashes(self):
"""
Returns a string of em dashes equal to the :class:`Category`\ 's
level. This is used to indent the category name in the admin
templates.
"""
return mark_safe('—' * self.level)
@models.permalink
def get_absolute_url(self):
return ('localtv_category', [self.slug])
def approved_set(self):
"""
Returns active videos for the category and its subcategories, ordered
by decreasing best date.
"""
opts = self._mptt_meta
lookups = {
'status': Video.ACTIVE,
'categories__left__gte': getattr(self, opts.left_attr),
'categories__left__lte': getattr(self, opts.right_attr),
'categories__tree_id': getattr(self, opts.tree_id_attr)
}
lookups = self._tree_manager._translate_lookups(**lookups)
return Video.objects.filter(**lookups).distinct()
approved_set = property(approved_set)
def unique_error_message(self, model_class, unique_check):
return 'Category with this %s already exists.' % (
unique_check[0],)
class SavedSearch(Source):
"""
A set of keywords to regularly pull in new videos from.
There's an administrative interface for doing "live searches"
Fields:
- site: site this savedsearch applies to
- query_string: a whitespace-separated list of words to search for. Words
starting with a dash will be processed as negative query terms
- when_created: date and time that this search was saved.
"""
query_string = models.TextField()
when_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.query_string
def update(self, **kwargs):
"""
Fetch and import new videos from this search.
"""
try:
SearchImport.objects.get(source=self,
status=SearchImport.STARTED)
except SearchImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
search_import = SearchImport.objects.create(
source=self,
auto_approve=self.auto_approve
)
searches = vidscraper.auto_search(
self.query_string,
max_results=100,
api_keys=lsettings.API_KEYS,
)
video_iters = []
for video_iter in searches:
try:
video_iter.load()
except Exception:
search_import.handle_error(u'Skipping import of search results '
u'from %s' % video_iter.__class__.__name__,
with_exception=True)
continue
video_iters.append(video_iter)
if video_iters:
super(SavedSearch, self).update(itertools.chain(*video_iters),
source_import=search_import,
**kwargs)
else:
# Mark the import as failed if none of the searches could load.
search_import.fail("All searches failed for {source}",
with_exception=False)
def source_type(self):
return u'Search'
class SourceImportIndex(models.Model):
video = models.OneToOneField('Video', unique=True)
index = models.PositiveIntegerField(blank=True, null=True)
class Meta:
abstract = True
class FeedImportIndex(SourceImportIndex):
source_import = models.ForeignKey('FeedImport', related_name='indexes')
class SearchImportIndex(SourceImportIndex):
source_import = models.ForeignKey('SearchImport', related_name='indexes')
class SourceImportError(models.Model):
message = models.TextField()
traceback = models.TextField(blank=True)
is_skip = models.BooleanField(help_text="Whether this error represents a "
"video that was skipped.")
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class FeedImportError(SourceImportError):
|
from rest_framework import exceptions as drf_exceptions
from rest_framework import versioning as drf_versioning
from rest_framework.compat import unicode_http_header
from rest_framework.utils.mediatypes import _MediaType
from api.base import exceptions
from api.base import utils
from api.base.renderers import BrowsableAPIRendererNoForms
from api.base.settings import LATEST_VERSIONS
def get_major_version(version):
return int(version.split('.')[0])
def url_path_version_to_decimal(url_path_version):
# 'v2' --> '2.0'
return str(float(url_path_version.split('v')[1]))
def decimal_version_to_url_path(decimal_version):
# '2.0' --> 'v2'
return 'v{}'.format(get_major_version(decimal_version))
def get_latest_sub_version(major_version):
# '2' --> '2.6'
return LATEST_VERSIONS.get(major_version, None)
class BaseVersioning(drf_versioning.BaseVersioning):
def __init__(self):
super(BaseVersioning, self).__init__()
def get_url_path_version(self, kwargs):
invalid_version_message = 'Invalid version in URL path.'
version = kwargs.get(self.version_param)
if version is None:
return self.default_version
version = url_path_version_to_decimal(version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotFound(invalid_version_message)
if get_major_version(version) == get_major_version(self.default_version):
return self.default_version
return version
def get_header_version(self, request, major_version):
invalid_version_message = 'Invalid version in "Accept" header.'
media_type = _MediaType(request.accepted_media_type)
version = media_type.params.get(self.version_param)
if not version:
return None
if version == 'latest':
return get_latest_sub_version(major_version)
version = unicode_http_header(version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotAcceptable(invalid_version_message)
return version
def get_default_version(self, request, major_version):
"""Returns the latest available version for the browsable api, otherwise REST_FRAMEWORK default version"""
if request.accepted_renderer.__class__ == BrowsableAPIRendererNoForms:
return get_latest_sub_version(major_version)
return self.default_version
def get_query_param_version(self, request, major_version):
invalid_version_message = 'Invalid version in query parameter.'
version = request.query_params.get(self.version_param)
if not version:
return None
if version == 'latest':
return get_latest_sub_version(major_version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotFound(invalid_version_message)
return version
def validate_pinned_versions(self, url_path_version, header_version, query_parameter_version):
url_path_major_version = get_major_version(url_path_version)
header_major_version = get_major_version(header_version) if header_version else None
query_major_version = get_major_version(query_parameter_version) if query_parameter_version else None
if header_version and header_major_version != url_path_major_version:
raise exceptions.Conflict(
detail='Version {} specified in "Accept" header does not fall within URL path version {}'.format(
header_version,
url_path_version,
),
)
if query_parameter_version and query_major_version != url_path_major_version:
raise exceptions.Conflict(
detail='Version {} specified in query parameter does not fall within URL path version {}'.format(
query_parameter_version,
url_path_version,
),
)
if header_version and query_parameter_version and (header_version != query_parameter_version):
raise exceptions.Conflict(
detail='Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
header_version,
query_parameter_version,
),
)
def determine_version(self, request, *args, **kwargs):
url_path_version = self.get_url_path_version(kwargs)
major_version = get_major_version(url_path_version)
header_version = self.get_header_version(request, major_version)
query_parameter_version = self.get_query_param_version(request, major_version)
version = url_path_version
if header_version or query_parameter_version:
self.validate_pinned_versions(url_path_version, header_version, query_parameter_version)
version = header_version if header_version else query_parameter_version
else:
version = self.get_default_version(request, major_version)
return version
def re | verse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
url_path_version = self.get_url_path_version(kwargs)
major_version = get_major_version(url_path_version)
query_parameter_version = self.get_query_param_version(request, major_version)
kwargs = {} if (kwargs is None) else kwargs
kwargs[self.version_param] = decimal_version_to_url_path(url_path_version)
query_kwargs = {'ve | rsion': query_parameter_version} if query_parameter_version else None
return utils.absolute_reverse(
viewname, query_kwargs=query_kwargs, args=args, kwargs=kwargs,
)
|
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSAccount import XFSAccount
class FilerioCom(XFSAccount):
__name__ = "FilerioCom"
__type__ = "ac | count"
__version__ = "0.07"
__status__ = "testing"
__description__ = """FileRio.in account plugin"""
__license__ = "GP | Lv3"
__authors__ = [("zoidberg", "[email protected]")]
PLUGIN_DOMAIN = "filerio.in"
|
import pytest
from ray.train.callbacks.results_preprocessors import (
ExcludedKeysResultsPreprocessor,
IndexedResultsPreprocessor,
SequentialResultsPreprocessor,
AverageResultsPreprocessor,
MaxResultsPreprocessor,
WeightedAverageResultsPreprocessor,
)
def test_excluded_keys_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}]
expected = [{"b": 2}, {"b": 4}]
preprocessor = ExcludedKeysResultsPreprocessor("a")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_indexed_results_preprocessor():
results = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
expected = [{"a": 1}, {"a": 3}]
preprocessor = IndexedResultsPreprocessor([0, 2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_sequential_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = [{"b": 2}, {"b": 6}]
preprocessor_1 = ExcludedKeysResultsPreprocessor("a")
# [{"b": 2}, {"b": 4}, {"b": 6}, {"b": 8}]
preprocessor_2 = IndexedResultsPreprocessor([0, 2])
preprocessor = SequentialResultsPreprocessor([preprocessor_1, preprocessor_2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"avg(a)": np.mean([result["a"] for result in results]),
"avg(b)": np.mean([result["b"] for result in results]),
}
)
preprocessor = AverageResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_max_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"max(a)": np.max([result["a"] for result in results]),
"max(b)": np.max([result["b"] for result in results]),
}
)
preprocessor = MaxResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preproce | ss(results)
assert preprocessed_results == expected
def test_weighted_average_results_preprocessor():
from copy import deepcopy
import numpy | as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
total_weight = np.sum([result["b"] for result in results])
for res in expected:
res.update(
{
"weight_avg_b(a)": np.sum(
[result["a"] * result["b"] / total_weight for result in results]
)
}
)
preprocessor = WeightedAverageResultsPreprocessor(["a"], "b")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
@pytest.mark.parametrize(
("results_preprocessor", "expected_value"),
[(AverageResultsPreprocessor, 2.0), (MaxResultsPreprocessor, 3.0)],
)
def test_warning_in_aggregate_results_preprocessors(
caplog, results_preprocessor, expected_value
):
import logging
from copy import deepcopy
from ray.util import debug
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"a": 1}, {"a": "invalid"}, {"a": 3}, {"a": "invalid"}]
results3 = [{"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}]
results4 = [{"a": 1}, {"a": 2}, {"a": 3}, {"c": 4}]
# test case 1: metric key `b` is missing from all workers
results_preprocessor1 = results_preprocessor(["b"])
results_preprocessor1.preprocess(results1)
assert "`b` is not reported from workers, so it is ignored." in caplog.text
# test case 2: some values of key `a` have invalid data type
results_preprocessor2 = results_preprocessor(["a"])
expected2 = deepcopy(results2)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected2:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results2) == expected2
# test case 3: all key `a` values are invalid
results_preprocessor2.preprocess(results3)
assert "`a` value type is not valid, so it is ignored." in caplog.text
# test case 4: some workers don't report key `a`
expected4 = deepcopy(results4)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected4:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results4) == expected4
for record in caplog.records:
assert record.levelname == "WARNING"
debug.reset_log_once("b")
debug.reset_log_once("a")
def test_warning_in_weighted_average_results_preprocessors(caplog):
import logging
from copy import deepcopy
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"b": 1}, {"b": 2}, {"b": 3}, {"b": 4}]
results3 = [
{"a": 1, "c": 3},
{"a": 2, "c": "invalid"},
{"a": "invalid", "c": 1},
{"a": 4, "c": "invalid"},
]
results4 = [
{"a": 1, "c": "invalid"},
{"a": 2, "c": "invalid"},
{"a": 3, "c": "invalid"},
{"a": 4, "c": "invalid"},
]
# test case 1: weight key `b` is not reported from all workers
results_preprocessor1 = WeightedAverageResultsPreprocessor(["a"], "b")
expected1 = deepcopy(results1)
for res in expected1:
res.update({"weight_avg_b(a)": 2.5})
assert results_preprocessor1.preprocess(results1) == expected1
assert (
"Averaging weight `b` is not reported by all workers in `train.report()`."
in caplog.text
)
assert "Use equal weight instead." in caplog.text
# test case 2: metric key `a` (to be averaged) is not reported from all workers
results_preprocessor1.preprocess(results2)
assert "`a` is not reported from workers, so it is ignored." in caplog.text
# test case 3: both metric and weight keys have invalid data type
results_preprocessor2 = WeightedAverageResultsPreprocessor(["a"], "c")
expected3 = deepcopy(results3)
for res in expected3:
res.update({"weight_avg_c(a)": 1.0})
assert results_preprocessor2.preprocess(results3) == expected3
# test case 4: all weight values are invalid
expected4 = deepcopy(results4)
for res in expected4:
res.update({"weight_avg_c(a)": 2.5})
assert results_preprocessor2.preprocess(results4) == expected4
assert "Averaging weight `c` value type is not valid." in caplog.text
for record in caplog.records:
assert record.levelname == "WARNING"
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
import re
from django.core.exceptions import ImproperlyConfigured
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from tranquil.models import Importer
__all__ = ( 'engine', 'meta', 'Session', )
class EngineCache(object):
__shared_state = dict(
engine = None,
meta = None,
Session = None,
)
_mappings = {
'sqlite3': 'sqlite',
'mysql': 'mysql',
'postgresql': 'postgresql',
'postgresql_psycopg2': 'postgresql+psycopg2',
'oracle': 'oracle',
}
def __init__(self):
from django.conf import settings
self.__dict__ = self.__shared_state
if self.engine is not None:
return
if settings.DATABASE_ENGINE == 'django_sqlalchemy.backend':
from django_sqlalchemy import backend
| self.engine = backend.engine
else:
options = {
'protocol': self._mappings.get( settings.DATABASE_ENGINE ),
'name': settings.DATABASE_NAME,
'user': settings.DATABASE_USER, |
'pass': settings.DATABASE_PASSWORD,
'host': settings.DATABASE_HOST,
'port': settings.DATABASE_PORT,
}
if options['protocol'] is None:
raise ImproperlyConfigured( 'Unknown database engine: %s' % settings.DATABASE_ENGINE )
url = '{protocol}://{user}:{pass}@{host}{port}/{name}'
for p in options:
if p == 'port' and len( options[p] ) > 0:
url = re.sub( '{%s}' % p, ':%s' % options[p], url )
else:
url = re.sub( '{%s}' % p, options[p], url )
self.engine = create_engine( url )
self.meta = MetaData(bind=self.engine,reflect=True)
self.Session = sessionmaker( bind=self.engine, autoflush=True, autocommit=False )
self.importer = Importer(self.meta)
cache = EngineCache()
engine = cache.engine
meta = cache.meta
Session = cache.Session
|
"""
Tests outgoing calls created with InitialAudio and/or InitialVideo, and
exposing the initial contents of incoming calls as values of InitialAudio and
InitialVideo
"""
import operator
from servicetest import (
assertContains, assertEquals, assertLength,
wrap_channel, EventPattern, call_async, make_channel_proxy)
from jingletest2 import JingleTest2, test_all_dialects
import constants as cs
def outgoing(jp, q, bus, conn, stream):
remote_jid = '[email protected]/beyond'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
rccs = conn.Properties.Get(cs.CONN_IFACE_REQUESTS, 'RequestableChannelClasses')
media_classes = [ rcc for rcc in rccs
if rcc[0][cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_STREAMED_MEDIA ]
assertLength(1, media_classes)
fixed, allowed = media_classes[0]
assertContains(cs.INITIAL_AUDIO, allowed)
assertContains(cs.INITIAL_VIDEO, allowed)
check_neither(q, conn, bus, stream, remote_handle)
check_iav(jt, q, conn, bus, stream, remote_handle, True, False)
check_iav(jt, q, conn, bus, stream, remote_handle, False, True)
check_iav(jt, q, conn, bus, stream, remote_handle, True, True)
def check_neither(q, conn, bus, stream, remote_handle):
"""
Make a channel without specifying InitialAudio or InitialVideo; check
that it's announced with both False, and that they're both present and
false in GetAll().
"""
path, props = conn.Requests.CreateChannel({
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: remote_handle})
assertContains((cs.INITIAL_AUDIO, False), props.items())
assertContains((cs.INITIAL_VIDEO, False), props.items())
chan = wrap_channel(bus.get_object(conn.bus_name, p | ath),
cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling'])
props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA)
assertContains(('InitialAudio', False), props.items())
assertContains(('InitialVideo', False), props.items())
# W | e shouldn't have started a session yet, so there shouldn't be any
# session handlers. Strictly speaking, there could be a session handler
# with no stream handlers, but...
session_handlers = chan.MediaSignalling.GetSessionHandlers()
assertLength(0, session_handlers)
def check_iav(jt, q, conn, bus, stream, remote_handle, initial_audio,
initial_video):
"""
Make a channel and check that its InitialAudio and InitialVideo properties
come out correctly.
"""
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: remote_handle,
cs.INITIAL_AUDIO: initial_audio,
cs.INITIAL_VIDEO: initial_video,
})
if initial_video and (not jt.jp.can_do_video()
or (not initial_audio and not jt.jp.can_do_video_only ())):
# Some protocols can't do video
event = q.expect('dbus-error', method='CreateChannel')
assertEquals(cs.NOT_CAPABLE, event.error.get_dbus_name())
else:
path, props = q.expect('dbus-return', method='CreateChannel').value
assertContains((cs.INITIAL_AUDIO, initial_audio), props.items())
assertContains((cs.INITIAL_VIDEO, initial_video), props.items())
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling'])
props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA)
assertContains(('InitialAudio', initial_audio), props.items())
assertContains(('InitialVideo', initial_video), props.items())
session_handlers = chan.MediaSignalling.GetSessionHandlers()
assertLength(1, session_handlers)
path, type = session_handlers[0]
assertEquals('rtp', type)
session_handler = make_channel_proxy(conn, path, 'Media.SessionHandler')
session_handler.Ready()
stream_handler_paths = []
stream_handler_types = []
for x in [initial_audio, initial_video]:
if x:
e = q.expect('dbus-signal', signal='NewStreamHandler')
stream_handler_paths.append(e.args[0])
stream_handler_types.append(e.args[2])
if initial_audio:
assertContains(cs.MEDIA_STREAM_TYPE_AUDIO, stream_handler_types)
if initial_video:
assertContains(cs.MEDIA_STREAM_TYPE_VIDEO, stream_handler_types)
for x in xrange (0, len(stream_handler_paths)):
p = stream_handler_paths[x]
t = stream_handler_types[x]
sh = make_channel_proxy(conn, p, 'Media.StreamHandler')
sh.NewNativeCandidate("fake", jt.get_remote_transports_dbus())
if t == cs.MEDIA_STREAM_TYPE_AUDIO:
sh.Ready(jt.get_audio_codecs_dbus())
else:
sh.Ready(jt.get_video_codecs_dbus())
sh.StreamState(cs.MEDIA_STREAM_STATE_CONNECTED)
e = q.expect('stream-iq',
predicate=jt.jp.action_predicate('session-initiate'))
jt.parse_session_initiate (e.query)
jt.accept()
events = reduce(operator.concat,
[ [ EventPattern('dbus-signal', signal='SetRemoteCodecs', path=p),
EventPattern('dbus-signal', signal='SetStreamPlaying', path=p),
] for p in stream_handler_paths
], [])
q.expect_many(*events)
chan.Close()
def incoming(jp, q, bus, conn, stream):
remote_jid = 'skinny.fists@heaven/antennas'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
for a, v in [("audio1", None), (None, "video1"), ("audio1", "video1")]:
if v!= None and not jp.can_do_video():
continue
if a == None and v != None and not jp.can_do_video_only():
continue
jt.incoming_call(audio=a, video=v)
e = q.expect('dbus-signal', signal='NewChannels',
predicate=lambda e:
cs.CHANNEL_TYPE_CONTACT_LIST not in e.args[0][0][1].values())
chans = e.args[0]
assertLength(1, chans)
path, props = chans[0]
assertEquals(cs.CHANNEL_TYPE_STREAMED_MEDIA, props[cs.CHANNEL_TYPE])
assertEquals(a != None, props[cs.INITIAL_AUDIO])
assertEquals(v != None, props[cs.INITIAL_VIDEO])
# FIXME: This doesn't check non-Google contacts that can only do one
# media type, as such contacts as simulated by JingleTest2 can always
# do both.
assertEquals(not jp.can_do_video() or not jp.can_do_video_only(),
props[cs.IMMUTABLE_STREAMS])
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA)
chan.Close()
if __name__ == '__main__':
test_all_dialects(outgoing)
test_all_dialects(incoming)
|
e('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_bzip2_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write(' | \n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
fi | le_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_bzip2_concat(self):
with TempDir() as tempdir:
bzip2_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with bz2.BZ2File(bzip2_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with bz2.BZ2File(bzip2_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with bz2.BZ2File(bzip2_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_bzip2_file = tempdir.create_temp_file()
with open(bzip2_file_name1, 'rb') as src, open(
final_bzip2_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name2, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name3, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_bzip2_file,
compression_type=beam.io.filesystem.CompressionTypes.BZIP2)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
pipeline.run()
def test_read_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_deflate_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_deflate_concat(self):
with TempDir() as tempdir:
deflate_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with open(deflate_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with open(deflate_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with open(deflate_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
final_deflate_file = tempdir.create_temp_file()
with open(deflate_file_name1, 'rb') as src, \
open(final_deflate_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name2, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name3, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_deflate_file,
compression_type=beam.io.filesystem.CompressionTypes.DEFLATE)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_gzip_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_gzip_concat(self):
with TempDir() as tempdir:
gzip_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with gzip.open(gzip_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with gzip.open(gzip_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with gzip.open(gzip_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_gzip_file = tempdir.create_temp_file()
with open(gzip_file_name1, 'rb') as src, \
open(final_gzip_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name2, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name3, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_gzip_file,
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_all_gzip(self):
_, lines = write_data(100)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([file_name])
| 'ReadAll' >> ReadAllFromText(
compression_type=CompressionTypes.GZIP))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_fil |
fro | m django.dispatch import receiver
from pretix.base.signals import register_payment_providers
|
@receiver(register_payment_providers, dispatch_uid="payment_paypal")
def register_payment_provider(sender, **kwargs):
from .payment import Paypal
return Paypal
|
from django.db import models
class AdjacencyListModel(models.Model):
title = models.CharField(max_length=100)
parent = models.ForeignKey(
'self', related_name='%(class)s_parent', on_delete=models.CASCADE, db_index=True, null=True, blank=True)
def __str__(self):
return 'adjacencylistmodel_%s' % self.title
class NestedSetModel(models.Model):
title = models.CharField(max_length=100)
lft = models.IntegerField(db_index=True)
rgt = models.IntegerField(db_in | dex=True)
level = models.IntegerField(db_index=Tru | e)
def __str__(self):
return 'nestedsetmodel_%s' % self.title
|
# -*- coding: utf-8 -*-
# Generate | d by Django 1.11.13 on 2019-04-10 03:58
from __future__ import | unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('netdevice', '0006_auto_20190409_0325'),
]
operations = [
migrations.RenameField(
model_name='vrf',
old_name='vrf_name',
new_name='name',
),
migrations.RenameField(
model_name='vrf',
old_name='vrf_target',
new_name='target',
),
]
|
from functools import reduce
class ScopedString (object):
def __init__ (self):
self._stack = []
def push (self, frame):
self._stack.append (frame)
def pop (self):
frame = self._stack.pop()
return frame
def __str__ (self):
return '.'.join (self._stack)
class ScopedList (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push()
def push (self):
self._stack.append ([])
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
self._stack.pop()
def append (self, val):
self._stack[-1].append (val)
def _normalize (self):
return reduce (lambda x, y: x + y, self._stack, [])
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
class ScopedDict (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push ()
def push (self):
| self._stack.insert (0, {})
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
tem | p = self._stack[0]
del (self._stack[0])
return temp
def _normalize (self):
normal = {}
for frame in self._stack:
for key, value in frame.items():
if key not in normal:
normal[key] = value
return normal
def __getitem__ (self, key):
for frame in self._stack:
if key in frame:
return frame[key]
raise KeyError (key)
def __setitem__ (self, key, value):
self._stack[0][key] = value
def __contains__ (self, key):
for frame in self._stack:
if key in frame:
return True
return False
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
def items (self):
return self._normalize().items()
def keys (self):
return self._normalize().keys()
def values (self):
return self._normalize().values()
|
"""A likelihood function representing a Student-t distribution.
Author:
Ilias Bilionis
Date:
1/21/2013
"""
__all__ = ['StudentTLikelihoodFunction']
import numpy as np
import scipy
import math
from . import GaussianLikelihoodFunction
class StudentTLikelihoodFunction(GaussianLikelihoodFunction):
"""An object representing a Student-t likelihood function."""
# The degrees of freedom
_nu = None
@property
def nu(self):
"""Get the degrees of freedom."""
return self._nu
@nu.setter
def nu(self, value):
"""Set the degrees of freedom."""
if not isinstance(value, float):
raise TypeError('nu must be a float.')
self._nu = value
def __init__(self, nu, num_input=None, data=None, mean_function=None, cov=None,
name='Student-t | Likelihood Function'):
"""Initialize the object.
Arguments:
nu --- The degrees of freedom of the dist | ribution.
Keyword Arguments
num_input --- The number of inputs. Optional, if
mean_function is a proper Function.
data --- The observed data. A vector. Optional,
if mean_function is a proper Function.
It can be set later.
mean_function --- The mean function. See the super class
for the description.
cov --- The covariance matrix. It can either be
a positive definite matrix, or a number.
The data or a proper mean_funciton is
preassumed.
name --- A name for the likelihood function.
"""
self.nu = nu
super(StudentTLikelihoodFunction, self).__init__(num_input=num_input,
data=data,
mean_function=mean_function,
cov=cov,
name=name)
def __call__(self, x):
"""Evaluate the function at x."""
mu = self.mean_function(x)
y = scipy.linalg.solve_triangular(self.L_cov, self.data - mu)
return (
- 0.5 * (self.nu + self.num_data) * math.log(1. + np.dot(y, y) / self.nu))
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_index_crud_operations_async.py
DESCRIPTION:
This sample demonstrates how to get, create, update, or delete an index.
USAGE:
python sample_index_crud_operations_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_SEARCH_SERVICE_ENDPOINT - the endpoint of your Azure Cognitive Search service
2) AZURE_SEARCH_API_KEY - your search API key
"""
import os
import asyncio
service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT")
key = os.getenv("AZURE_SEARCH_API_KEY")
from azure.core.credentials import AzureKeyCredential
from azure.search.documents.indexes.aio import SearchIndexClient
from azure.search.documents.indexes.models import (
ComplexField,
CorsOptions,
SearchIndex,
ScoringProfile,
SearchFieldDataType,
SimpleField,
SearchableField
)
client = SearchIndexClient(service_endpoint, AzureKeyCredential(key))
async def create_index():
# [START create_index_async]
name = "hotels"
fields = [
SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True),
SimpleField(name="baseRate", type=SearchFieldDataType.Double),
SearchableField(name="description", type=SearchFieldDataType.String, collection=True),
ComplexField(name="address", fields=[
SimpleField(name="streetAddress", type=SearchFieldDataType.String),
SimpleField(name="city", type=SearchFieldDataType.String),
], collection=True)
]
cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60)
scoring_profiles = []
index = SearchIndex(
name=name,
fields=fields,
scoring_profiles=scoring_profiles,
cors_options=cors_options)
result = await client.create_index(index)
# [END create_index_async]
async def get_index():
# [START get_index_async]
name = "hotels"
result = await client.get_index(name)
# [END get_index_async]
async def update_index():
# [START update_index_async]
name = "hotels"
fields = [
SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True),
SimpleField(name="baseRate", type=SearchFieldDataType.Double),
SearchableField(name="description", type=SearchFieldDataType.String, collection=True),
SearchableField(name="hotelName", type=SearchFieldDataType.String),
ComplexField(name="address", fields=[
SimpleField(name="streetAddress", type=SearchFieldDataType.String),
SimpleField(name="city", type=SearchFieldDataType.String),
SimpleFi | eld(name="state", type=SearchFieldDataType.String),
], collection=True)
]
cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60)
scoring_profile = ScoringProfile(
name="MyProfile"
)
scoring_profiles = []
scoring_profiles.append(scoring_profile)
index = SearchIndex(
name=name,
fields=fields,
scoring_profiles=scoring_profiles,
cors_options=cors_options)
result = a | wait client.create_or_update_index(index=index)
# [END update_index_async]
async def delete_index():
# [START delete_index_async]
name = "hotels"
await client.delete_index(name)
# [END delete_index_async]
async def main():
await create_index()
await get_index()
await update_index()
await delete_index()
await client.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
def RGB01ToHex(rgb):
"""
Return an RGB color | value as a hex color string.
"""
return '#%02x%02x%02x' % tuple([int(x * 255) for x in rgb])
def hexToRGB01(hexColor):
"""
Return a hex color string as an RGB tuple of floats in the range 0..1
"""
h = hexColor.lstrip('#')
return tuple([x / 255.0 for x in [int(h[i:i + 2], 16) for i in (0, 2, 4) | ]])
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a | copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
class ECSServiceCheck(Script):
def service_check(self, env | ):
import params
env.set_params(params)
# run fs list command to make sure ECS client can talk to ECS backend
list_command = format("fs -ls /")
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user=params.hdfs_user
)
ExecuteHadoop(list_command,
user=params.hdfs_user,
logoutput=True,
conf_dir=params.hadoop_conf_dir,
try_sleep=3,
tries=20,
bin_dir=params.hadoop_bin_dir
)
if __name__ == "__main__":
ECSServiceCheck().execute()
|
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribuetd under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# l | imitations under the License.
from dealer.git import git
from django.template import RequestContext
requestcontext = None
class MakoMiddleware(object):
def process_request(self, request):
global requestcontext
requestcontext = RequestContext(request)
| requestcontext['is_secure'] = request.is_secure()
requestcontext['site'] = request.get_host()
requestcontext['REVISION'] = git.revision
|
"""
Module with functionality for splitting and shuffling datasets.
"""
import numpy as np
from sklearn.utils import murmurhash3_32
from spotlight.interactions import Interactions
def _index_or_none(array, shuffle_index):
if array is None:
return None
else:
return array[shuffle_index]
def shuffle_interactions(interactions,
random_state=None):
"""
Shuffle interactions.
Parameters
----------
interactions: :class:`spotlight.interactions.Interactions`
The interactions to shuffle.
random_state: np.random.RandomState, optional
The random state used for the shuffle.
Returns
-------
interactions: :class:`spotlight.interactions.Interactions`
The shuffled interactions.
"""
if random_state is None:
random_state = np.random.RandomState()
shuffle_indices = np.arange(len(interactions.user_ids))
random_state.shuffle(shuffle_indices)
return Interactions(interactions.user_ids[shuffle_indices],
interactions.item_ids[shuffle_indices],
ratings=_index_or_none(interactions.ratings,
shuffle_indices),
timestamps=_index_or_none(interactions.timestamps,
shuffle_indices),
weights=_index_or_none(interactions.weights,
shuffle_indices),
num_users=interactions.num_users,
num_items=interactions.num_items)
def random_train_test_split(interactions,
test_percentage=0.2,
random_state=None):
"""
Randomly split interactions between training and testing.
Parameters
----------
interactions: :class:`spotlight.interactions.Interactions`
The interactions to shuffle.
test_percentage: float, optional
The fraction of interactions to place in the test set.
random_state: np.random.RandomState, optional
The random state used for the shuffle.
Returns
-------
(train, test): (:class:`spotlight.interactions.Interactions`,
:class:`spotlight.interactions.Interactions`)
A tuple of (train data, test data)
"""
interactions = shuffle_interactions(interactions,
| random_state=random_state)
cutoff = int((1.0 - test_percentage) * len(interactions))
train_idx = slice(None, cutoff)
test_idx = slice(cutoff, None)
train = Interaction | s(interactions.user_ids[train_idx],
interactions.item_ids[train_idx],
ratings=_index_or_none(interactions.ratings,
train_idx),
timestamps=_index_or_none(interactions.timestamps,
train_idx),
weights=_index_or_none(interactions.weights,
train_idx),
num_users=interactions.num_users,
num_items=interactions.num_items)
test = Interactions(interactions.user_ids[test_idx],
interactions.item_ids[test_idx],
ratings=_index_or_none(interactions.ratings,
test_idx),
timestamps=_index_or_none(interactions.timestamps,
test_idx),
weights=_index_or_none(interactions.weights,
test_idx),
num_users=interactions.num_users,
num_items=interactions.num_items)
return train, test
def user_based_train_test_split(interactions,
test_percentage=0.2,
random_state=None):
"""
Split interactions between a train and a test set based on
user ids, so that a given user's entire interaction history
is either in the train, or the test set.
Parameters
----------
interactions: :class:`spotlight.interactions.Interactions`
The interactions to shuffle.
test_percentage: float, optional
The fraction of users to place in the test set.
random_state: np.random.RandomState, optional
The random state used for the shuffle.
Returns
-------
(train, test): (:class:`spotlight.interactions.Interactions`,
:class:`spotlight.interactions.Interactions`)
A tuple of (train data, test data)
"""
if random_state is None:
random_state = np.random.RandomState()
minint = np.iinfo(np.uint32).min
maxint = np.iinfo(np.uint32).max
seed = random_state.randint(minint, maxint, dtype=np.int64)
in_test = ((murmurhash3_32(interactions.user_ids,
seed=seed,
positive=True) % 100 /
100.0) <
test_percentage)
in_train = np.logical_not(in_test)
train = Interactions(interactions.user_ids[in_train],
interactions.item_ids[in_train],
ratings=_index_or_none(interactions.ratings,
in_train),
timestamps=_index_or_none(interactions.timestamps,
in_train),
weights=_index_or_none(interactions.weights,
in_train),
num_users=interactions.num_users,
num_items=interactions.num_items)
test = Interactions(interactions.user_ids[in_test],
interactions.item_ids[in_test],
ratings=_index_or_none(interactions.ratings,
in_test),
timestamps=_index_or_none(interactions.timestamps,
in_test),
weights=_index_or_none(interactions.weights,
in_test),
num_users=interactions.num_users,
num_items=interactions.num_items)
return train, test
|
, {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Party']"})
},
'meps.delegation': {
'Meta': {'object_name': 'Delegation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'meps.delegationrole': {
'Meta': {'object_name': 'DelegationRole'},
'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'delegation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Delegation']"}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.group': {
'Meta': {'object_name': 'Group'},
'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'meps.groupmep': {
'Meta': {'object_name': 'GroupMEP'},
'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.mep': {
'Meta': {'ordering': "['last_name']", 'object_name': 'MEP', '_ormbases': ['reps.Representative']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bxl_building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bxl_building'", 'to': "orm['meps.Building']"}),
'bxl_fax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_floor': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_office_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_phone1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bxl_phone2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'committees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Committee']", 'through': "orm['meps.CommitteeRole']", 'symmetrical': 'False'}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Country']", 'through': "orm['meps.CountryMEP']", 'symmetrical': 'False'}),
'delegations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Delegation']", 'through': "orm['meps.DelegationRole']", 'symmetrical': 'False'}),
'ep_debates': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_declarations': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'ep_motions': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_opinions': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_questions': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_reports': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'ep_webpage': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Group']", 'through': "orm['meps.GroupMEP']", 'symmetrical': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Organization']", 'through': "orm['meps.OrganizationMEP']", 'symmetrical': 'False'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'representative_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['reps.Representative']", 'unique': 'True', 'primary_key': 'True'}),
'stg_building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stg_building'", 'to': "orm['meps.Building']"}),
'stg_fax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_floor': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_office_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_phone1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stg_phone2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_score': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True'})
},
'meps.organization': {
'Meta': {'object_name': 'Organization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'meps.organizationmep': {
'Meta': {'object_name': 'OrganizationMEP'},
'begin': ('django.db.models.fields.DateField', [], {}),
'end': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'meps.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"})
},
'reps.opinion': {
'Meta': {'object_name': 'Opinion'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1023'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'reps.opinionrep': {
'Meta': {'object_name': 'OpinionREP'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.model | s.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Opinion']"}),
'representative': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Representative']"})
},
'reps.party': {
'Meta': {'object_name': 'Party'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {' | unique': 'True', 'max_length': '255'})
},
'reps.partyrepresentative': {
'Meta': {'object_name': 'PartyRepresentative'},
'current': ( |
# importing libraries:
import maya.cmds as cmds
import maya.mel as mel
# global variables to this module:
CLASS_NAME = "Arm"
TITLE = "m028_arm"
DESCRIPTION = "m029_armDesc"
ICON = "/Icons/dp_arm.png"
def Arm(dpAutoRigInst):
""" This function will create all guides needed to compose an arm.
"""
# check modules integrity:
guideDir = 'Modules'
checkModuleList = ['dpLimb', 'dpFinger']
checkResultList = dpAutoRigInst.startGuideModules(guideDir, "check", None, checkModuleList=checkModuleList)
if len(checkResultList) == 0:
# creating module instances:
armLimbInstance = dpAutoRigInst.initGuide('dpLimb', guideDir)
# change name to arm:
dpAutoRigInst.guide.Limb.editUserName(armLimbInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m028_arm'].capitalize())
# create finger instances:
indexFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(indexFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m032_index'])
middleFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(middleFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m033_middle'])
ringFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(ringFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m034_ring'])
pinkFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(pinkFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m035_pink'])
thumbFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir)
dpAutoRigInst.guide.Finger.editUserName(thumbFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m036_thumb'])
# edit arm limb guide:
armBaseGuide = armLimbInstance.moduleGrp
cmds.setAttr(armBaseGuide+".translateX", 2.5)
cmds.setAttr(armBaseGuide+".translateY", 16)
cmds.setAttr(armBaseGuide+".displayAnnotation", 0)
cmds.setAttr(armLimbInstance.cvExtremLoc+".translateZ", 7)
cmds.setAttr(armLimbInstance.radiusCtrl+".translateX", 1.5)
# edit finger guides:
fingerInstanceList = [indexFingerInstance, middleFingerInstance, ringFingerInstance, pinkFingerInstance, thumbFingerInstance]
fingerTZList = [0.6, 0.2, -0.2, -0.6, 0.72]
for n, fingerInstance in enumerate(fingerInstanceList):
cmds.setAttr(fingerInstance.moduleGrp+".translateX", 11)
cmds.setAttr(fingerInstance.moduleGrp+".translateY", 16)
cmds.setAttr(fingerInstance.moduleGrp+".translateZ", fingerTZList[n])
cmds.setAttr(fingerInstance.moduleGrp+".displayAnnotation", 0)
cmds.setAttr(fingerInstance.radiusCtrl+".translateX", 0.3)
cmds.setAttr(fingerInstance.annotation+".visibility", 0)
if n == len(fingerInstanceList)-1:
# correct not commun values for thumb guide:
cmds.setAttr(thumbFingerInstance.moduleGrp+".translateX", 10.1)
cmds.setAttr(thumbFingerInstance.moduleGrp+".rotateX", 60)
dpAutoRigInst.guide.Finger.changeJointNumber(thumbFingerInstance, 2)
cmds.setAttr(thumbFingerInstance.moduleGrp+".nJoints", 2)
# parent finger guide to the arm wrist guide:
cmds.parent(fingerInstance.moduleGrp, armLimbInstance.cvExtremLoc, absolute=True)
# select the armGuide_Base:
cmds.select(armBaseGuide)
else:
# error checking modules in the folder:
mel.eval('error \"'+ dpAutoRigInst.langDic[dpAutoRigInst. | langName]['e001_GuideNotChecked'] +' - '+ (", ").join(checkResultList) +' | \";')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-28 15:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations. | Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('genevieve_client', '0003_variant_myvariant_dbsnp'),
]
operations = [
migrations.CreateModel(
name='OpenHumansUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serial | ize=False, verbose_name='ID')),
('access_token', models.CharField(blank=True, max_length=30)),
('refresh_token', models.CharField(blank=True, max_length=30)),
('token_expiration', models.DateTimeField(null=True)),
('connected_id', models.CharField(max_length=30, unique=True)),
('openhumans_username', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='gennoteseditor',
name='gennotes_id',
),
migrations.RemoveField(
model_name='gennoteseditor',
name='genome_storage_enabled',
),
migrations.AddField(
model_name='gennoteseditor',
name='connected_id',
field=models.CharField(default=0, max_length=30, unique=True),
preserve_default=False,
),
]
|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from distribution import *
import operator as o
from utils.lib import gt, lt, gte, lte, neq, eq
__author__ = "bigfatnoob"
def sample(values, size=100):
return np.random.choice(values, size=size)
def expected_value(values, size=1000):
means = []
for _ in range(1000):
samples = sample(values, int(size))
means.append(np.mean(samples))
return np.mean(means)
def standard_deviation(values):
return np.std(values)
def percentile(values, percent):
return np.percentile(values, percent)
def probability(values):
return sum([1 if v >= 1 else 0 for v in values]) / len(values)
def lambda_ev(*args):
return lambda x: expected_value(x, *args)
def lambda_std():
return lambda x: standard_deviation(x)
def lambda_percentile(*args):
return lambda x: percentile(x, *args)
def lambda_probability():
return lambda x: probability(x)
def to_int(func):
return lambda a, b: 1 if func(a, b) else 0
evaluations = {
"EV": lambda_ev,
"STD": lambda_std,
"PERCENTILE": lambda_percentile,
"PROBABILITY": lambda_probability
}
distributions = {
"constant": Constant,
"normal": Normal,
"normalCI": NormalCI, |
"uniform": Uniform,
"random": Random,
"exp": Exponential,
"binomial": Binomial,
"geometric": Geometric,
"triangular": Triangular
}
operations = {
"+": o.add,
"-": o.sub,
"*": o.mul,
"/": o.div,
"|": max,
"&": o.mul,
">": to_int(gt),
| "<": to_int(lt),
">=": to_int(gte),
"<=": to_int(lte),
"==": to_int(eq),
"!=": to_int(neq)
}
|
#!/usr/bin/env python
"""Distutils setup file, used to install or test 'setuptools'"""
import textwrap
import sys
try:
import setuptools
except ImportError:
sys.stderr.write("Distribute 0.7 may only upgrade an existing "
"Distribute 0.6 installation")
raise SystemExit(1)
long_description = textwrap.dedent("""
Distribute - legacy package
This package is a simple compatibility layer that installs Setuptools 0.7+.
""").lstrip()
setup_params = dict(
name="distribute",
version='0.7.3',
description="distribute legacy wrapper",
author="The fellowship of the packaging",
author_email="[email protected]",
license="PSF or ZPL",
long_description=long_description,
keywords="CPAN PyPI distutils eggs package management",
url="http://packages.python.org/distribute",
zip_safe=True,
classifiers=textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: Python Software Foundation License
License :: OSI Approved :: Zope Public License
Operating System :: OS Independent
Programming Language :: Python :: 2.4
Programming Language :: Python :: 2.5
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.1
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
| Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
install_requires=[
'setuptools>=0.7',
],
)
|
if __name__ == '__main__':
setuptools.setup(**setup_params)
|
from django.core.management.base import BaseCommand, CommandError
from t | weets.tasks import stream
#The class must be named | Command, and subclass BaseCommand
class Command(BaseCommand):
# Show this when the user types help
help = "My twitter stream command"
# A command must define handle()
def handle(self, *args, **options):
stream()
|
#!/usr/bin/env python
# coding=utf-8
"""
Created on April 15 2017
@author: yytang
"""
from scrapy import Selector
from libs.misc import get_spider_name_from_domain
from libs.polish import polish_title, polish_subtitle, polish_content
from novelsCrawler.spiders.novelSpider import NovelSpider
class PiaotianSpider(NovelSpider):
"""
classdocs
example: https://www.piaotian.com/html/9/9459/index.html
"""
allowed_domains = ['www.piaotian.com']
name = get_spider_name_from_domain(allowed_domains[0])
# custom_settings = {
# 'DOWNLOAD_DELAY': 0.3,
# }
def parse_title(self, response):
sel = Selector(response)
title = sel.xpath('//h1/text()').extract()[0]
title = polish_title(title, self.name)
return title
def parse_episodes(self, response):
sel = Selector(response)
episodes = []
subtitle_selectors = sel.xpath('//div[@class="centent"]/ul/li/a')
for page_id, subtitle_selector in enumerate(subtitle_selectors):
subtitle_url = subtitle_selector.xpath('@href').extract()[0]
subtitle_url = response.urljoin(subtitle_url.strip())
subtitle_name = subtitle_selector.xpath('text()').extract()[0]
subtitle_name = polish_subtitle(subtitle_name)
episodes.append((page_id, subtitle_name, subtitle_url))
return episodes
def parse_content(self, response):
| # sel = Selector(response)
# content = sel.xpath('//div[@id="content"]/p/text()').extract()
# content = polish_content(content)
html = str(response.body.decode('GBK'))
pattern = r' (.*)'
import re
m = re.search(pattern, html)
if m:
content = m.grou | p(1)
else:
content = ''
content = content.replace('<br /><br /> ', '\n\n')
return content
|
a, b = <warning descr="Need more val | ues to unpack">None | </warning> |
"""
@author: dhoomake | thu
"""
from __future__ import absolute_import | , unicode_literals |
#!/usr/env/bin/ python3
from setuptools import setup, Extension
#
#CXX_FLAGS = "-O3 -std=gnu++11 -Wall -Wno-comment"
#
## List of C/C++ sources that will conform the library
#sources = [
#
# "andrnx/clib/android.c",
#
#]
setup(name="andrnx",
version="0.1",
description="Package to convert from GNSS logger to Rinex files",
author='Miquel Garcia',
author_email='i | [email protected]',
url='https://www.rokubun.cat',
packages=['andrnx'],
te | st_suite="andrnx.test",
scripts=['bin/gnsslogger_to_rnx'])
|
class DrawingDimensioningWorkbench (Workbench):
# Icon generated using by converting linearDimension.svg to xpm format using Gimp
Icon = '''
/* XPM */
static char * linearDimension_xpm[] = {
"32 32 10 1",
" c None",
". c #000000",
"+ c #0008FF",
"@ c #0009FF",
"# c #000AFF",
"$ c #00023D",
"% c #0008F7",
"& c #0008EE",
"* c #000587",
"= c #000001",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". +@@ + .",
". @+@@+ +@@+@ .",
". +@+@@@@@@ @@@@@@@# .",
"$%@@@@@@@@@+@@@@@@@@@@@@@@@@@@&$",
". #@@@@@@@@ #+@@@@@@@@*=",
". @+@@+ +@@@@@ .",
". +@ #@++ .",
". # .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". ."};
'''
MenuText = 'Drawing Dimensioning'
def Initialize(self):
import importlib, os
from dimensioning import __dir__, debugPrint, iconPath
import linearDimension
import linearDimension_stack
import deleteDimension
import circularDimension
import grabPointAdd
import textAdd
import textEdit
import textMove
import escapeDimensioning
import angularDimension
import radiusDimension
import centerLines
import noteCircle
import toleranceAdd
commandslist = [
'dd_linearDimension', #where dd is short-hand for drawing dimensioning
'dd_linearDimensionStack',
'dd_circularDimension',
'dd_radiusDimension',
'dd_angularDimension',
'dd_centerLines',
'dd_centerLine',
'dd_noteCircle',
'dd_grabPoint',
'dd_addText',
'dd_editText',
'dd_moveText',
'dd_addTolerance',
'dd_deleteDimension',
'dd_escapeDimensioning',
]
self.appendToolbar('Drawing Dimensioning', commandslist)
import unfold
import unfold_bending_note
import unfold_export_to_dxf
unfold_cmds = [
'dd_unfold',
'dd_bendingNote',
]
if hasattr(os,'uname') and os.uname()[0] == 'Linux' : #this command only works on Linux systems
unfold_cmds.append('dd_exportToDxf')
self.appendToolbar( 'Drawing Dimensioning Folding', unfold_cmds )
import weldingSymbols
if int( FreeCAD.Version()[1] > 15 ) and int( FreeCAD.Version()[2].split()[0] ) > 5165:
weldingCommandList = ['dd_weldingGroupCommand']
else:
| weldingCommandList = weldingSymbols.weldingCmds
self.appendToolbar('Drawing Dimensioning Welding Symbols', weldingCommandList)
| self.appendToolbar('Drawing Dimensioning Help', [ 'dd_help' ])
FreeCADGui.addIconPath(iconPath)
FreeCADGui.addPreferencePage( os.path.join( __dir__, 'Resources', 'ui', 'drawing_dimensioing_prefs-base.ui'),'Drawing Dimensioning' )
Gui.addWorkbench(DrawingDimensioningWorkbench())
|
# Copyright (c) 2009-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distr | ibuted in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, | Boston, MA 02110-1301 USA.
from tortoisehg.util import hglib, patchctx
from tortoisehg.hgqt.qtlib import geticon, getoverlaidicon
from PyQt4.QtCore import *
from PyQt4.QtGui import *
nullvariant = QVariant()
def getSubrepoIcoDict():
'Return a dictionary mapping each subrepo type to the corresponding icon'
_subrepoType2IcoMap = {
'hg': 'hg',
'git': 'thg-git-subrepo',
'svn': 'thg-svn-subrepo',
'hgsubversion': 'thg-svn-subrepo',
'empty': 'hg'
}
icOverlay = geticon('thg-subrepo')
subrepoIcoDict = {}
for stype in _subrepoType2IcoMap:
ic = geticon(_subrepoType2IcoMap[stype])
ic = getoverlaidicon(ic, icOverlay)
subrepoIcoDict[stype] = ic
return subrepoIcoDict
class HgFileListModel(QAbstractTableModel):
"""
Model used for listing (modified) files of a given Hg revision
"""
showMessage = pyqtSignal(QString)
def __init__(self, parent):
QAbstractTableModel.__init__(self, parent)
self._boldfont = parent.font()
self._boldfont.setBold(True)
self._ctx = None
self._files = []
self._filesdict = {}
self._fulllist = False
self._subrepoIcoDict = getSubrepoIcoDict()
@pyqtSlot(bool)
def toggleFullFileList(self, value):
self._fulllist = value
self.loadFiles()
self.layoutChanged.emit()
def __len__(self):
return len(self._files)
def rowCount(self, parent=None):
return len(self)
def columnCount(self, parent=None):
return 1
def file(self, row):
return self._files[row]['path']
def setContext(self, ctx):
reload = False
if not self._ctx:
reload = True
elif self._ctx.rev() is None:
reload = True
elif ctx.thgid() != self._ctx.thgid():
reload = True
if reload:
self._ctx = ctx
self.loadFiles()
self.layoutChanged.emit()
def fileFromIndex(self, index):
if not index.isValid() or index.row()>=len(self) or not self._ctx:
return None
row = index.row()
return self._files[row]['path']
def dataFromIndex(self, index):
if not index.isValid() or index.row()>=len(self) or not self._ctx:
return None
row = index.row()
return self._files[row]
def indexFromFile(self, filename):
if filename in self._filesdict:
row = self._files.index(self._filesdict[filename])
return self.index(row, 0)
return QModelIndex()
def _buildDesc(self, parent):
files = []
ctxfiles = self._ctx.files()
modified, added, removed = self._ctx.changesToParent(parent)
ismerge = bool(self._ctx.p2())
# Add the list of modified subrepos to the top of the list
if not isinstance(self._ctx, patchctx.patchctx):
if ".hgsubstate" in ctxfiles or ".hgsub" in ctxfiles:
from mercurial import subrepo
# Add the list of modified subrepos
for s, sd in self._ctx.substate.items():
srev = self._ctx.substate.get(s, subrepo.nullstate)[1]
stype = self._ctx.substate.get(s, subrepo.nullstate)[2]
sp1rev = self._ctx.p1().substate.get(s, subrepo.nullstate)[1]
sp2rev = ''
if ismerge:
sp2rev = self._ctx.p2().substate.get(s, subrepo.nullstate)[1]
if srev != sp1rev or (sp2rev != '' and srev != sp2rev):
wasmerged = ismerge and s in ctxfiles
files.append({'path': s, 'status': 'S', 'parent': parent,
'wasmerged': wasmerged, 'stype': stype})
# Add the list of missing subrepos
subreposet = set(self._ctx.substate.keys())
subrepoparent1set = set(self._ctx.p1().substate.keys())
missingsubreposet = subrepoparent1set.difference(subreposet)
for s in missingsubreposet:
wasmerged = ismerge and s in ctxfiles
stype = self._ctx.p1().substate.get(s, subrepo.nullstate)[2]
files.append({'path': s, 'status': 'S', 'parent': parent,
'wasmerged': wasmerged, 'stype': stype})
if self._fulllist and ismerge:
func = lambda x: True
else:
func = lambda x: x in ctxfiles
for lst, flag in ((added, 'A'), (modified, 'M'), (removed, 'R')):
for f in filter(func, lst):
wasmerged = ismerge and f in ctxfiles
f = self._ctx.removeStandin(f)
files.append({'path': f, 'status': flag, 'parent': parent,
'wasmerged': wasmerged})
return files
def loadFiles(self):
self._files = []
try:
self._files = self._buildDesc(0)
if bool(self._ctx.p2()):
_paths = [x['path'] for x in self._files]
_files = self._buildDesc(1)
self._files += [x for x in _files if x['path'] not in _paths]
except EnvironmentError, e:
self.showMessage.emit(hglib.tounicode(str(e)))
self._filesdict = dict([(f['path'], f) for f in self._files])
def data(self, index, role):
if not index.isValid() or index.row()>len(self) or not self._ctx:
return nullvariant
if index.column() != 0:
return nullvariant
row = index.row()
column = index.column()
current_file_desc = self._files[row]
current_file = current_file_desc['path']
if role in (Qt.DisplayRole, Qt.ToolTipRole):
return QVariant(hglib.tounicode(current_file))
elif role == Qt.DecorationRole:
if self._fulllist and bool(self._ctx.p2()):
if current_file_desc['wasmerged']:
icn = geticon('thg-file-merged')
elif current_file_desc['parent'] == 0:
icn = geticon('thg-file-p0')
elif current_file_desc['parent'] == 1:
icn = geticon('thg-file-p1')
return QVariant(icn.pixmap(20,20))
elif current_file_desc['status'] == 'A':
return QVariant(geticon('fileadd'))
elif current_file_desc['status'] == 'R':
return QVariant(geticon('filedelete'))
elif current_file_desc['status'] == 'S':
stype = current_file_desc.get('stype', 'hg')
return QVariant(self._subrepoIcoDict[stype])
#else:
# return QVariant(geticon('filemodify'))
elif role == Qt.FontRole:
if current_file_desc['wasmerged']:
return QVariant(self._boldfont)
else:
return nullvariant
|
"""Maya initialisation for Mindbender pipeline"""
from maya import cmds
def setup():
assert __import__("pyblish_maya").is_setup(), (
"pyblish-mindbender dep | ends on pyblish_maya which has not "
"yet | been setup. Run pyblish_maya.setup()")
from pyblish import api
api.register_gui("pyblish_lite")
from mindbender import api, maya
api.install(maya)
# Allow time for dependencies (e.g. pyblish-maya)
# to be installed first.
cmds.evalDeferred(setup)
|
""":mod:`kinsumer.checkpointer` --- Persisting positions for Kinesis shards
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import abc
import json
import os.path
from typing import Optional, Dict
class Checkpointer(abc.ABC, object):
"""Checkpointer is the interface for persisting positions for Kinesis shards
"""
@abc.abstractmethod
def get_checkpoints(self) -> Dict[str, str]:
"""Get a dictionary whose keys are all the shard ids we are aware of,
and whose values are the sequence id | of the last record processed for
its shard
"""
@abc.abstractmethod
def checkpoint(self, shard_id: str, sequence: str) -> None:
"""Persist the sequence number for a given shard"""
@abc.abstractmethod
def get_checkpoint(self, shard_id: str) -> Optional[str]:
" | ""Get the sequence number of the last successfully processed record"""
class InMemoryCheckpointer(Checkpointer):
def __init__(self) -> None:
self._checkpoints = {}
def get_checkpoints(self) -> Dict[str, str]:
return self._checkpoints.copy()
def checkpoint(self, shard_id: str, sequence: str) -> None:
self._checkpoints[shard_id] = sequence
def get_checkpoint(self, shard_id: str) -> Optional[str]:
return self._checkpoints.get(shard_id)
class FileCheckpointer(InMemoryCheckpointer):
def __init__(self, file: str) -> None:
super().__init__()
self.file = os.path.expanduser(file)
if os.path.exists(self.file):
with open(self.file, 'rb') as f:
self._checkpoints = json.load(f)
def checkpoint(self, shard_id: str, sequence: str) -> None:
super().checkpoint(shard_id, sequence)
with open(self.file, 'wb') as f:
f.write(json.dumps(self._checkpoints, ensure_ascii=False).encode())
|
class ParametrizedError(Exception):
def __init__(self, problem, invalid):
self.problem = str(problem)
self.invalid = str(invalid)
def __str__(self):
print('--- Error: {0}\n--- Caused by: {1}'.format(self.problem, self.invalid))
class InvalidToken(ParametrizedError):
pass
class ToneError(ParametrizedError):
pass
class IntervalError(ParametrizedError):
pass
class TriolaError(ParametrizedError):
pass
class ConfigError(ParametrizedError):
pass
| class ComposingError(P | arametrizedError):
pass |
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class TestFailoverStatus(object):
def test_ | get_status(self, request, mgmt_root):
failover_status = | mgmt_root.tm.cm.failover_status
assert failover_status._meta_data['uri'].endswith(
"/mgmt/tm/cm/failover-status/")
failover_status.refresh()
des =\
(failover_status.entries['https://localhost/mgmt/tm/cm/failover-status/0']
['nestedStats']
['entries']
['status']
['description'])
assert des == "ACTIVE"
|
# third party
# third party
import numpy as np
import pytest
# syft absolute
# absolute
from syft.core.tensor.smpc.share_tensor import ShareTensor
@pytest.mark.smpc
def test | _bit_extraction() -> None:
share = ShareTensor(rank=0, parties_info=[], ring_size=2**32)
data = np.array([[21, 32], [-54, 89]], dtype=np.int32)
share.child = data
exp_res1 = np.array([[False | , False], [True, False]], dtype=np.bool_)
res = share.bit_extraction(31).child
assert (res == exp_res1).all()
exp_res2 = np.array([[True, False], [False, False]], dtype=np.bool_)
res = share.bit_extraction(2).child
assert (res == exp_res2).all()
@pytest.mark.smpc
def test_bit_extraction_exception() -> None:
share = ShareTensor(rank=0, parties_info=[], ring_size=2**32)
data = np.array([[21, 32], [-54, 89]], dtype=np.int32)
share.child = data
with pytest.raises(Exception):
share >> 33
with pytest.raises(Exception):
share >> -1
|
/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Mm/Mm_Ensembl_exon.txt'
reference_rows=0
if '.gtf' in refExonCoordinateFile: firstLine = False
else: firstLine = True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
reference_rows+=1
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
if chr_status == False:
chr = string.replace(chr,'chr','')
o.write(string.join([chr,start,stop,geneID+':'+exon,'',strand],'\t')+'\n')
start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
splicesite_db[chr,start]=geneID
splicesite_db[chr,stop]=geneID
if 'I' in exon:
try: introns[geneID].append([start,stop])
except Exception: introns[geneID] = [[start,stop]]
files = getFiles(directory)
for file in files:
firstLine=True
if 'junction' in file and '.bed' in file:
for line in open(directory+'/'+file,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'12', '6998470', '6998522', 'ENSG00000111671:E1.1_ENSE00001754003', '0', '-'
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
if strand == '-':
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
key = chr,exon1_stop,exon2_start
if (chr,exon1_stop) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon2_start) in splicesite_db: ### only include splice sites where one site is known
geneID = splicesite_db[(chr,exon2_start)]
novel_db[chr,exon1_stop,strand] = exon1_start,geneID,5
real_splicesites[chr,exon2_start]=None
elif (chr,exon2_start) not in splicesite_db: ### record the splice site and position of the max read
if (chr,exon1_stop) in splicesite_db: ### only include splice sites where one site is known
#if 121652702 ==exon2_start:
#print chr, exon1_start,exon1_stop,exon2_start,exon2_stop, strand;sys.exit()
geneID = splicesite_db[(chr,exon1_stop)]
novel_db[chr,exon2_start,strand] = exon2_stop,geneID,3
real_splicesites[chr,exon1_stop]=None
else:
real_splicesites[chr,exon1_stop]=None
real_splicesites[chr,exon2_start]=None
print len(novel_db), 'novel splice sites and', len(real_splicesites), 'known splice sites.'
gene_organized={}
for (chr,pos1,strand) in novel_db:
pos2,geneID,type = novel_db[(chr,pos1,strand)]
try: gene_organized[chr,geneID,strand].append([pos1,pos2,type])
except Exception: gene_organized[chr,geneID,strand] = [[pos1,pos2,type]]
def intronCheck(geneID,coords):
### see if the coordinates are within a given intron
try:
for ic in introns[geneID]:
if withinQuery(ic,coords):
return True
except Exception:
pass
def withinQuery(ls1,ls2):
imax = max(ls1)
imin = min(ls1)
qmax = max(ls2)
qmin = min(ls2)
if qmin >= imin and qmax <= imax:
return True
else:
return False
### Compare the novel splice site locations in each gene
added=[]
for (chr,geneID,strand) in gene_organized:
gene_organized[(chr,geneID,strand)].sort()
if strand == '-':
gene_organized[(chr,geneID,strand)].reverse()
i=0
set = gene_organized[(chr,geneID,strand)]
for (pos1,pos2,type) in set:
k = [pos1,pos2]
annotation='novel'
if i==0 and type == 3:
if len(set)>1:
if set[i+1][-1]==5:
l = [set[i+1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
elif type == 5:
if set[i-1][-1]==3:
l = [set[i-1][0],pos1]
if (max(l)-min(l))<300 and intronCheck(geneID,l):
k=l
#print chr,k
annotation='novel-paired'
k.sort(); i+=1
if k not in added:
values = string.join([chr,str(k[0]),str(k[1]),geneID+':'+annotation,'',strand],'\t')+'\n'
added.append(k)
o.write(values)
o.close()
if __name__ == '__main__':
import multiprocessing as mlp
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile = ''
#bam_dir = "H9.102.2.6.bam"
#outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed'
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a directory containing BAM files as input in the command-line"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --g /Users/me/ReferenceExonCoordinates/Hs_Ensembl_exon_hg19.txt --r /Users/me/ExonBEDRef/Hs_Ensembl_exon-cancer_hg19.bed --a exon --a junction --a reference"
print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --a junction"
sys.exit()
else:
analysisType = []
useMultiProcessing=False
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r=','a=','m='])
for opt, arg in options:
if opt == '--i': bam_dir=arg
elif opt == '--g': refExonCoordinateFile=arg
elif opt == '--r': outputExonCoordinateRefBEDfile=arg
elif opt == '--a': analysisType.append(arg) ### options are: all, junction, exon, reference
| elif opt == '--m': ### Run each BAM file on a different processor
if arg == 'yes': useMultiProcessing=True
elif arg == 'True': useMultiProce | ssing=True
else: useMultiProcessing=False
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
if len(analysisType) == 0:
analysisType = ['exon','junction','reference']
try:
refExonCoordinateFile = refExonCoordinateFile
outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile
except Exception:
print 'Please provide a exon coordinate text file using the option --g and a output coordinate file path (--r) to generate exon.bed files'
analysisType = ['junction']
refExonCoordinateFile = ''
outputExonCoordinateRefBEDfile |
#!/usr/bin/env python3
from django.shortcuts import render
# Create your views here.
from CnbetaApis.datas.Models import *
from CnbetaApis.datas.get_letv_json import get_letv_json
from CnbetaApis.datas.get_youku_json import get_youku_json
from django.views.decorators.csrf import csrf_exempt
from django.http import *
from datetime import timezone, timedelta
import json
def getrelate(ids, session):
relateds = session.query(Article).filter(Article.id.in_(ids))
relateds_arr = []
for related in relateds:
relateds_arr.append({
'id': related.id,
'title': related.title,
'url': related.url,
})
return relate | ds_arr
def get_home_data(request):
if not request.method == 'GET':
raise HttpResponseNotAllowed('GET')
lastID = request.GET.get('lastid')
limit = request.GET.get('limit') or 20
session = DBSession()
datas = None
| if lastID:
datas = session.query(Article).order_by(desc(Article.id)).filter(and_(Article.introduction != None, Article.id < lastID)).limit(limit).all()
else:
datas = session.query(Article).order_by(desc(Article.id)).limit(limit).all()
values = []
for data in datas:
values.append({
'id': data.id,
'title': data.title,
'url': data.url,
'source': data.source,
'imgUrl': data.imgUrl,
'introduction': data.introduction,
'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(),
'related': getrelate(data.related.split(','), session),
'readCount': data.readCount,
'opinionCount': data.opinionCount,
})
session.close()
return JsonResponse({"result": values})
def get_article_content(request):
if not request.method == 'GET':
raise HttpResponseNotAllowed('GET')
article_id = request.GET.get('id')
session = DBSession()
datas = session.query(Article).filter(Article.id == article_id).all()
if not len(datas):
raise Http404('Article not exist')
data = datas[0]
result = {'result': {
'id': data.id,
'title': data.title,
'url': data.url,
'imgUrl': data.imgUrl,
'source': data.source,
'introduction': data.introduction,
'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(),
'related': getrelate(data.related.split(','), session),
'readCount': data.readCount,
'opinionCount': data.opinionCount,
'content': json.loads(data.content),
}}
session.close()
return JsonResponse(result)
@csrf_exempt
def get_video_realUrl(req):
if not req.method == 'POST':
raise HttpResponseNotAllowed('POST')
source_url = req.POST.get('url')
source_type = req.POST.get('type')
if source_type == "youku":
source_url = get_youku_json(source_url)
elif source_type == "letv":
source_url = get_letv_json(source_url)
else:
raise Http404('Article not exist')
return JsonResponse({"result": source_url})
|
ET,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_NAME,
CONF_TRACK,
DEVICE_SCHEMA,
SERVICE_SCAN_CALENDARS,
do_setup,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.setup import async_setup_component
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from tests.common import async_mock_service
GOOGLE_CONFIG = {CONF_CLIENT_ID: "client_id", CONF_CLIENT_SECRET: "client_secret"}
TEST_ENTITY = "calendar.we_are_we_are_a_test_calendar"
TEST_ENTITY_NAME = "We are, we are, a... Test Calendar"
TEST_EVENT = {
"summary": "Test All Day Event",
"start": {},
"end": {},
"location": "Test Cases",
"description": "test event",
"kind": "calendar#event",
"created": "2016-06-23T16:37:57.000Z",
"transparency": "transparent",
"updated": "2016-06-24T01:57:21.045Z",
"reminders": {"useDefault": True},
"organizer": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"sequence": 0,
"creator": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"id": "_c8rinwq863h45qnucyoi43ny8",
"etag": '"2933466882090000"',
"htmlLink": "https://www.google.com/calendar/event?eid=*******",
"iCalUID": "[email protected]",
"status": "confirmed",
}
def get_calendar_info(calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_TRACK: calendar["track"],
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: slugify(calendar["summary"]),
}
],
}
)
return calendar_info
@pytest.fixture(autouse=True)
def mock_google_setup(hass, test_calendar):
"""Mock the google set up functions."""
hass.loop.run_until_complete(async_setup_component(hass, "group", {"group": {}}))
calendar = get_calendar_info(test_calendar)
calendars = {calendar[CONF_CAL_ID]: calendar}
patch_google_auth = patch(
"homeassistant.components.google.do_authentication", side_effect=do_setup
)
patch_google_load = patch(
"homeassistant.components.google.load_config", return_value=calendars
)
patch_google_services = patch("homeassistant.components.google.setup_services")
async_mock_service(hass, "google", SERVICE_SCAN_CALENDARS)
with patch_google_auth, patch_google_load, patch_google_services:
yield
@pytest.fixture(autouse=True)
def mock_http(hass):
"""Mock the http component."""
hass.http = Mock()
@pytest.fixture(autouse=True)
def set_time_zone():
"""Set the time zone for the tests."""
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
dt_util.set_default_time_zone(dt_util.get_time_zone("America/Regina"))
yield
dt_util.set_default_time_zone(dt_util.get_time_zone("UTC"))
@pytest.fixture(name="google_service")
def mock_google_service():
"""Mock google service."""
patch_google_service = patch(
"homeassistant.components.google.calendar.GoogleCalendarService"
)
with patch_google_service as mock_service:
yield mock_service
async def test_all_day_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.dt.date.today() + dt_util.dt.timedelta(days=7)
end_event = week_from_today + dt_util.dt.timedelta(days=1)
event = copy.deepcopy(TEST_EVENT)
start = week_from_today.isoformat()
end = end_event.isoformat()
event["start"]["date"] = start
event["end"]["date"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_N | AME,
"message": event["summary"],
"all_day": True,
"offset_reached": False,
"start_time": week_from_today.strftime(DATE_STR_FORMA | T),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_future_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() + dt_util.dt.timedelta(minutes=30)
end_event = one_hour_from_now + dt_util.dt.timedelta(minutes=60)
start = one_hour_from_now.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": one_hour_from_now.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() - dt_util.dt.timedelta(minutes=30)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() + dt_util.dt.timedelta(minutes=14)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event_summary = "Test Event in Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
event["summary"] = "{} !!-15".format(event_summary)
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": False,
"offset_reached": True,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
@pytest.mark.skip
async def test_all_day_offset_in_progress_event(hass, mock_next_event):
" |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************
**espressopp.Int3D**
********************
.. function:: espressopp.__Int3D(\*args)
:param \*args:
:type \*args:
.. function:: espresso | pp.__Int3D.x(v, [0)
:param v:
:param [0:
:type v:
:type [0:
:rtype:
.. funct | ion:: espressopp.__Int3D.y(v, [1)
:param v:
:param [1:
:type v:
:type [1:
:rtype:
.. function:: espressopp.__Int3D.z(v, [2)
:param v:
:param [2:
:type v:
:type [2:
:rtype:
.. function:: espressopp.toInt3DFromVector(\*args)
:param \*args:
:type \*args:
.. function:: espressopp.toInt3D(\*args)
:param \*args:
:type \*args:
"""
from _espressopp import Int3D
from espressopp import esutil
# This injects additional methods into the Int3D class and pulls it
# into this module
class __Int3D(Int3D) :
__metaclass__ = esutil.ExtendBaseClass
__originit = Int3D.__init__
def __init__(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
x = arg0.x
y = arg0.y
z = arg0.z
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
x, y, z = arg0
elif isinstance(arg0, int):
x = y = z = arg0
else :
raise TypeError("Cannot initialize Int3D from %s" % (args))
elif len(args) == 3 :
x, y, z = args
else :
raise TypeError("Cannot initialize Int3D from %s" % (args))
return self.__originit(x, y, z)
# create setters and getters
@property
def x(self): return self[0]
@x.setter
def x(self, v): self[0] = v
@property
def y(self) : return self[1]
@y.setter
def y(self, v) : self[1] = v
@property
def z(self) : return self[2]
@z.setter
def z(self, v) : self[2] = v
# string conversion
def __str__(self) :
return str((self[0], self[1], self[2]))
def __repr__(self) :
return 'Int3D' + str(self)
def toInt3DFromVector(*args):
"""Try to convert the arguments to a Int3D.
This function will only convert to a Int3D if x, y and z are
specified."""
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Int3D):
return arg0
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
return Int3D(*args)
elif len(args) == 3:
return Int3D(*args)
raise TypeError("Specify x, y and z.")
def toInt3D(*args):
"""Try to convert the arguments to a Int3D, returns the argument,
if it is already a Int3D."""
if len(args) == 1 and isinstance(args[0], Int3D):
return args[0]
else:
return Int3D(*args)
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.segments`
"""
import pytest
import numpy
from matplotlib import rcParams
from matplotlib.colors import ColorConverter
from matplotlib.collections import PatchCollection
from ...segments import (Segment, SegmentList, SegmentListDict,
DataQualityFlag, DataQualityDict)
from ...time import to_gps
from .. import SegmentAxes
from ..segments import SegmentRectangle
from .test_axes import TestAxes as _TestAxes
# extract color cycle
COLOR_CONVERTER = ColorConverter()
COLOR_CYCLE = rcParams['axes.prop_cycle'].by_key()['color']
COLOR0 = COLOR_CONVERTER.to_rgba(COLOR_CYCLE[0])
class TestSegmentAxes(_TestAxes):
AXES_CLASS = SegmentAxes
@staticmethod
@pytest.fixture()
def segments():
return SegmentList([Segment(0, 3), Segment(6, 7)])
@staticmethod
@pytest.fixture()
def flag():
known = SegmentList([Segment(0, 3), Segment(6, 7)])
active = SegmentList([Segment(1, 2), Segment(3, 4), Segment(5, 7)])
return DataQualityFlag(name='Test segments', known=known,
active=active)
def test_plot_flag(self, ax, flag):
c = ax.plot_flag(flag)
assert c.get_label() == flag.texname
assert len(ax.collections) == 2
assert ax.collections[0] is c
flag.isgood = False
c = ax.plot_flag(flag)
assert tuple(c.get_facecolors()[0]) == (1., 0., 0., 1.)
c = ax.plot_flag(flag, known={'facecolor': 'black'})
c = ax.plot_flag(flag, known='fancy')
def test_plot_dqflag(self, ax, flag):
with pytest.deprecated_call():
ax.plot_dqflag(flag)
assert ax.collections # make sure it plotted something
def test_plot_dict(self, ax, flag):
dqd = DataQualityDict()
dqd['a'] = flag
dqd['b'] = flag
colls = ax.plot_dict(dqd)
assert len(colls) == len(dqd)
assert all(isinstance(c, PatchCollection) for c in colls)
assert colls[0].get_label() == 'a'
assert colls[1].get_label() == 'b'
colls = ax.plot_dict(dqd, label='name')
assert colls[0].get_label() == 'Test segments'
colls = ax.plot_dict(dqd, label='anything')
assert colls[0].get_label() == 'anything'
def test_plot_dqdict(self, ax, flag):
with pytest.deprecated_call():
ax.plot_dqdict(DataQualityDict(a=flag))
def test_plot_segmentlist(self, ax, segments):
c = ax.plot_segmentlist(segments)
assert isinstance(c, PatchCollection)
assert numpy.isclose(ax.dataLim.x0, 0.)
assert numpy.isclose(ax.dataLim.x1, 7.)
assert len( | c.get_paths()) == len(segments)
assert ax.get_epoch() == segments[0][0]
# test y
p = ax.plot_segmentlist(segments).get_paths()[0].get_extents()
assert p.y0 + p.height/2. == 1.
| p = ax.plot_segmentlist(segments, y=8).get_paths()[0].get_extents()
assert p.y0 + p.height/2. == 8.
# test kwargs
c = ax.plot_segmentlist(segments, label='My segments',
rasterized=True)
assert c.get_label() == 'My segments'
assert c.get_rasterized() is True
# test collection=False
c = ax.plot_segmentlist(segments, collection=False, label='test')
assert isinstance(c, list)
assert not isinstance(c, PatchCollection)
assert c[0].get_label() == 'test'
assert c[1].get_label() == ''
assert len(ax.patches) == len(segments)
# test empty
c = ax.plot_segmentlist(type(segments)())
def test_plot_segmentlistdict(self, ax, segments):
sld = SegmentListDict()
sld['TEST'] = segments
ax.plot(sld)
def test_plot(self, ax, segments, flag):
dqd = DataQualityDict(a=flag)
ax.plot(segments)
ax.plot(flag)
ax.plot(dqd)
ax.plot(flag, segments, dqd)
def test_insetlabels(self, ax, segments):
ax.plot(segments)
ax.set_insetlabels(True)
def test_fmt_data(self, ax):
# just check that the LIGOTimeGPS repr is in place
value = 1234567890.123
assert ax.format_xdata(value) == str(to_gps(value))
# -- disable tests from upstream
def test_imshow(self):
return NotImplemented
def test_segmentrectangle():
patch = SegmentRectangle((1.1, 2.4), 10)
assert patch.get_xy(), (1.1, 9.6)
assert numpy.isclose(patch.get_height(), 0.8)
assert numpy.isclose(patch.get_width(), 1.3)
assert patch.get_facecolor() == COLOR0
# check kwarg passing
patch = SegmentRectangle((1.1, 2.4), 10, facecolor='red')
assert patch.get_facecolor() == COLOR_CONVERTER.to_rgba('red')
# check valign
patch = SegmentRectangle((1.1, 2.4), 10, valign='top')
assert patch.get_xy() == (1.1, 9.2)
patch = SegmentRectangle((1.1, 2.4), 10, valign='bottom')
assert patch.get_xy() == (1.1, 10.0)
with pytest.raises(ValueError):
patch = SegmentRectangle((0, 1), 0, valign='blah')
|
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHE | R IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Qualifications:
def __init__(self, requirements=None):
if requirements == None:
requirements = []
self.requirements = requirements
def add(self, req):
| self.requirements.append(req)
def get_as_params(self):
params = {}
assert(len(self.requirements) <= 10)
for n, req in enumerate(self.requirements):
reqparams = req.get_as_params()
for rp in reqparams:
params['QualificationRequirement.%s.%s' % ((n+1),rp) ] = reqparams[rp]
return params
class Requirement(object):
"""
Representation of a single requirement
"""
def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False):
self.qualification_type_id = qualification_type_id
self.comparator = comparator
self.integer_value = integer_value
self.required_to_preview = required_to_preview
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
"Comparator": self.comparator,
}
if self.comparator != 'Exists' and self.integer_value is not None:
params['IntegerValue'] = self.integer_value
if self.required_to_preview:
params['RequiredToPreview'] = "true"
return params
class PercentAssignmentsSubmittedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsAbandonedRequirement(Requirement):
"""
The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsReturnedRequirement(Requirement):
"""
The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsApprovedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsRejectedRequirement(Requirement):
"""
The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class NumberHitsApprovedRequirement(Requirement):
"""
Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class LocaleRequirement(Requirement):
"""
A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account.
"""
def __init__(self, comparator, locale, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview)
self.locale = locale
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
"Comparator": self.comparator,
'LocaleValue.Country': self.locale,
}
if self.required_to_preview:
params['RequiredToPreview'] = "true"
return params
class AdultRequirement(Requirement):
"""
Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default).
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
|
# Determine whether an integer is a palindrome. Do this without extra space.
class Solution:
# @return a boolean
def isPalindrome1(self, x):
if x < 0 or x % 10 == 0 and x: |
return False
xhalf = 0
while x > xhalf:
xhalf = xhalf * 10 + x % 10
x /= 10
return (x == xhalf or x == xhalf/10
)
def isPalindrome(self, x):
if x < 0:
return False |
size, xreverse = x, 0
while size:
xreverse = xreverse * 10 + size % 10
size = (size - (size % 10)) / 10
return True if xreverse==x else False
if __name__ == '__main__':
s = Solution()
print s.isPalindrome1(0) |
'''
Created on Aug 27, 2013
@author: De | von
Define gui events
'''
from pyHopeEngine import BaseEvent
class Event_ButtonPressed(BaseEvent):
'''Sent when a button is pressed'''
eventType = "ButtonPressed"
def | __init__(self, value):
'''Contains a value identifying the button'''
self.value = value
class Event_ScreenResize(BaseEvent):
'''Sent when a screen resize is requestsed'''
eventType = "ScreenResize"
def __init__(self, width, height):
self.width = width
self.height = height |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
import time
import datetime
conn = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="drupal")
cann = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="content_delivery_weather")
cursor = conn.cursor()
cursar = cann.cursor()
cursor.execute("""SELECT uid, mail FROM users""")
rows = cursor.fetchall()
for row in rows:
if row[0] != 0:
print('{0} : {1} '.format(row[0], row[1]))
#print('UPDATE new_v4_users_probes_edit SET email = {0} WHERE uid = {1}'.form | at(row[1], row[0]))
cursar.execute("""UPDATE new_v4_users_probes_edit SET email = %s WHERE userid = %s""", | (row[1], row[0]))
cursar.execute("""SELECT probename, probeid FROM new_v4_sonde""")
rows = cursar.fetchall()
for row in rows:
cursar.execute("""SHOW TABLES LIKE %s""",("%" + row[0] + "%",))
rowsbis = cursar.fetchall()
for rowbis in rowsbis:
result = rowbis[0].split("_")
month = 1 + int(result[4])
s = "01/" + str(month) + "/" + result[3]
timestamp = time.mktime(datetime.datetime.strptime(s, "%d/%m/%Y").timetuple())
print('{0} : {1} year: {2} month: {3} timestamp: {4}'.format(row[0], rowbis[0], result[3], result[4], round(timestamp,0)))
cursar.execute("""SELECT firsttime FROM new_v4_sonde WHERE probeid = %s""",(row[1],))
rowsbisbis = cursar.fetchall()
for rowbisbis in rowsbisbis:
if rowbisbis[0] == None:
cursar.execute("""UPDATE new_v4_sonde SET firsttime = %s WHERE probeid = %s""",(timestamp,row[1]))
print('firsttime: {0}'.format(rowbisbis[0],))
conn.close()
cann.close()
|
# I made some modifications to termcolor so you can pass HEX colors to
# the colored function. It then chooses the nearest xterm 256 color to
# that HEX color. This requires some color functions that I have added
# in my python path.
#
# 2015/02/16
#
#
# coding: utf-8
# Copyright (c) 2008-2011 Volvox Development Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Konstantin Lepa <[email protected]>
"""ANSII Color formatting for output in terminal."""
from __future__ import print_function
import os
import re
from hexrgb_conversion import rgb
from x256 import from_rgb
__ALL__ = ["colored", "cprint"]
VERSION = (1, 1, 0)
ATTRIBUTES = dict(
list(
zip(
["bold", "dark", "", "underline", "blink", "", "reverse", "concealed"],
list(range(1, 9)),
)
)
)
del ATTRIBUTES[""]
HIGHLIGHTS = dict(
list(
zip(
[
"on_grey",
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
"on_white",
],
list(range(40, 48)),
)
)
)
COLORS = dict(
list(
zip(
["grey", "red", "green", "yellow", "blue", "magenta", "cyan", "white"],
list(range(30, 38)),
)
)
)
RESET = "\033[0m"
def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
I made some modification so you can pass HEX colors too
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv("ANSI_COLORS_DISABLED") is None:
fmt_str = "\033[%dm%s"
if color is not None:
if "#" in color:
color = re.sub("[#]", "" | , color)
RGB = rgb(color)
x256_color_index = from_rgb(RGB[0], RGB[1], RGB[2])
text = "\033[38;5;%dm | %s" % (x256_color_index, text)
else:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
if "#" in on_color:
on_color = re.sub("[#]", "", on_color)
RGB = rgb(on_color)
x256_color_index = from_rgb(RGB[0], RGB[1], RGB[2])
text = "\033[48;5;%dm%s" % (x256_color_index, text)
else:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs)
if __name__ == "__main__":
print("Current terminal type: %s" % os.getenv("TERM"))
print("Test basic colors:")
cprint("Grey color", "grey")
cprint("Red color", "red")
cprint("Green color", "green")
cprint("Yellow color", "yellow")
cprint("Blue color", "blue")
cprint("Magenta color", "magenta")
cprint("Cyan color", "cyan")
cprint("White color", "white")
print(("-" * 78))
print("Test highlights:")
cprint("On grey color", on_color="on_grey")
cprint("On red color", on_color="on_red")
cprint("On green color", on_color="on_green")
cprint("On yellow color", on_color="on_yellow")
cprint("On blue color", on_color="on_blue")
cprint("On magenta color", on_color="on_magenta")
cprint("On cyan color", on_color="on_cyan")
cprint("On white color", color="grey", on_color="on_white")
print("-" * 78)
print("Test attributes:")
cprint("Bold grey color", "grey", attrs=["bold"])
cprint("Dark red color", "red", attrs=["dark"])
cprint("Underline green color", "green", attrs=["underline"])
cprint("Blink yellow color", "yellow", attrs=["blink"])
cprint("Reversed blue color", "blue", attrs=["reverse"])
cprint("Concealed Magenta color", "magenta", attrs=["concealed"])
cprint(
"Bold underline reverse cyan color",
"cyan",
attrs=["bold", "underline", "reverse"],
)
cprint(
"Dark blink concealed white color",
"white",
attrs=["dark", "blink", "concealed"],
)
print(("-" * 78))
print("Test mixing:")
cprint("Underline red on grey color", "red", "on_grey", ["underline"])
cprint("Reversed green on red color", "green", "on_red", ["reverse"])
print("Using HEX colors:")
cprint("Use HEX color EE2E2F", "#EE2E2F")
|
#! /usr/bin/python
'''
Suppose a sorted array is rotated at some pivot unknown to you beforehand. (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element.
'''
class Solution:
# @param num, a list of integer
# @return an integer
# You may assume no duplicate exists in the array.
def findMinNoDuplicate(self, num):
INT_MIN_VALUE = -(2**32)
size = len(num)
if size == 0:
return INT_MIN_VALUE
elif size == 1:
return num[0]
low_index = 0
high_index = size - 1
while (low_index < high_index - 1):
mid_index = low_index + (high_index - low_index) / 2
if (num[mid_index] > num[high_index]):
low_index = mid_index
else:
high_index = mid_index
return min(num[low_index], num[high_index])
# @param num, a list of integer
# @return an integer
# You may assume duplicate exists in the | array.
def findMinDuplicate(self, num):
INT_MIN_VALUE = -(2**32)
size = len(num)
if size == 0:
return INT_MIN_VALUE
elif size == 1:
return num[0]
low_index = 0
high_index = size - 1
while (low_index < high_index - 1):
mid_index = low_index + (high_index - low_index) / 2
if (num | [mid_index] > num[high_index]):
low_index = mid_index
elif (num[mid_index] < num[high_index]):
high_index = mid_index
else:
high_index -= 1
return min(num[low_index], num[high_index])
if __name__ == '__main__':
solution = Solution()
print solution.findMinDuplicate([3,3,1,2,2])
|
import pytest
import re
import capybara
class TestHasSelector:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_is_true_if_the_given_selector_is_on_the_page(self, session):
assert session.has_selector("xpath", "//p")
assert session.has_selector("css", "p a#foo")
assert session.has_selector("//p[contains(.,'est')]")
def test_is_false_if_the_given_selector_is_not_on_the_page(self, session):
assert not session.has_selector("xpath", "//abbr")
assert not session.has_selector("css", "p a#doesnotexist")
assert not session.has_selector("//p[contains(.,'thisstringisnotonpage')]")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
assert not session.has_selector("p a#doesnotexist")
assert session.has_selector("p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
assert session.has_selector(".//a[@id='foo']")
assert not session.has_selector(".//a[@id='red']")
def test_is_true_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
assert session.has_selector("//p", count=3)
assert session.has_selector("//p//a[@id='foo']", count=1)
assert session.has_selector("//p[contains(.,'est')]", count=1)
def test_is_false_if_the_content_is_not_on_the_page_the_given_number_of_times(self, session):
assert not session.has_selector("//p", count=6)
assert not session.has_selector("//p//a[@id='foo']", count=2)
assert not session.has_selector("//p[contains(.,'est')]", count=5)
def test_is_false_if_the_content_is_not_on_the_page_at_all(self, session):
assert not session.has_selector("//abbr", count=2)
assert not session.has_selector("//p//a[@id='doesnotexist']", count=1)
def test_discards_all_matches_where_the_given_string_is_not_contained(self, session):
assert session.has_selector("//p//a", text="Redirect", count=1)
assert not session.has_selector("//p", text="Doesnotexist")
def test_respects_visibility_setting(self, session):
assert session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=False)
assert not session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=True)
capybara.ignore_hidden_elements = False
assert session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=False)
capybara.visible_text_only = True
assert not session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=True)
def test_discards_all_matches_where_the_given_regex_is_not_matched(self, session):
assert session.has_selector("//p//a", text=re.compile("re[dab]i", re.IGNORECASE), count=1)
assert not session.has_selector("//p//a", text=re.compile("Red$"))
def test_only_matches_elements_that_match_exact_text_exactly(self, session):
assert session.has_selector("id", "h2one", exact_text="Header Class Test One")
assert not session.has_selector("id", "h2one", exact_text="Header Class Test")
def | test_only_matches_elements_that_match_exactly_when_exact_text_true(self, session):
assert session.has_selector("id", "h2one", text="Header Class Test One", exact_text=True)
assert not session.has_selector("id", "h2one", text="Header Class Test", ex | act_text=True)
def test_matches_substrings_when_exact_text_false(self, session):
assert session.has_selector("id", "h2one", text="Header Class Test One", exact_text=False)
assert session.has_selector("id", "h2one", text="Header Class Test", exact_text=False)
class TestHasNoSelector:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_is_false_if_the_given_selector_is_on_the_page(self, session):
assert not session.has_no_selector("xpath", "//p")
assert not session.has_no_selector("css", "p a#foo")
assert not session.has_no_selector("//p[contains(.,'est')]")
def test_is_true_if_the_given_selector_is_not_on_the_page(self, session):
assert session.has_no_selector("xpath", "//abbr")
assert session.has_no_selector("css", "p a#doesnotexist")
assert session.has_no_selector("//p[contains(.,'thisstringisnotonpage')]")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
assert session.has_no_selector("p a#doesnotexist")
assert not session.has_no_selector("p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
assert not session.has_no_selector(".//a[@id='foo']")
assert session.has_no_selector("../a[@id='red']")
def test_is_false_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
assert not session.has_no_selector("//p", count=3)
assert not session.has_no_selector("//p//a[@id='foo']", count=1)
assert not session.has_no_selector("//p[contains(.,'est')]", count=1)
def test_is_true_if_the_content_is_on_the_page_the_wrong_number_of_times(self, session):
assert session.has_no_selector("//p", count=6)
assert session.has_no_selector("//p//a[@id='foo']", count=2)
assert session.has_no_selector("//p[contains(.,'est')]", count=5)
def test_is_true_if_the_content_is_not_on_the_page_at_all(self, session):
assert session.has_no_selector("//abbr", count=2)
assert session.has_no_selector("//p//a[@id='doesnotexist']", count=1)
def test_discards_all_matches_where_the_given_string_is_contained(self, session):
assert not session.has_no_selector("//p//a", text="Redirect", count=1)
assert session.has_no_selector("//p", text="Doesnotexist")
def test_discards_all_matches_where_the_given_regex_is_matched(self, session):
assert not session.has_no_selector("//p//a", text=re.compile(r"re[dab]i", re.IGNORECASE), count=1)
assert session.has_no_selector("//p//a", text=re.compile(r"Red$"))
def test_only_matches_elements_that_do_not_match_exact_text_exactly(self, session):
assert not session.has_no_selector("id", "h2one", exact_text="Header Class Test One")
assert session.has_no_selector("id", "h2one", exact_text="Header Class Test")
def test_only_matches_elements_that_do_not_match_exactly_when_exact_text_true(self, session):
assert not session.has_no_selector("id", "h2one", text="Header Class Test One",
exact_text=True)
assert session.has_no_selector("id", "h2one", text="Header Class Test", exact_text=True)
def test_does_not_match_substrings_when_exact_text_false(self, session):
assert not session.has_no_selector("id", "h2one", text="Header Class Test One",
exact_text=False)
assert not session.has_no_selector("id", "h2one", text="Header Class Test", exact_text=False)
|
#
# Copyright (C) 2017 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# wit | h this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This is based on sherpa/sim/tests_sim_unit.py.
"""
from sherpa.astro import sim
# This is part of #3 | 97
#
def test_list_samplers():
"""Ensure list_samplers returns a list."""
mcmc = sim.MCMC()
samplers = mcmc.list_samplers()
assert isinstance(samplers, list)
assert len(samplers) > 0
def test_list_samplers_contents():
"""Are the expected values included"""
# Test that the expected values exist in this list,
# but do not enforce these are the only values. This is
# a slightly-different return list to the non-astro version.
#
samplers = sim.MCMC().list_samplers()
for expected in ['mh', 'metropolismh', 'pragbayes', 'fullbayes']:
assert expected in samplers
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 131