code
stringlengths 658
1.05M
|
---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_user
short_description: Adds or removes users to RabbitMQ
description:
- Add or remove users to RabbitMQ and assign permissions
version_added: "1.1"
author: '"Chris Hoffman (@chrishoffman)"'
options:
user:
description:
- Name of user to add
required: true
default: null
aliases: [username, name]
password:
description:
- Password of user to add.
- To change the password of an existing user, you must also specify
C(force=yes).
required: false
default: null
tags:
description:
- User tags specified as comma delimited
required: false
default: null
permissions:
description:
- a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv,
and represents a permission rule for that vhost.
- This option should be preferable when you care about all permissions of the user.
- You should use vhost, configure_priv, write_priv, and read_priv options instead
if you care about permissions for just some vhosts.
required: false
default: []
vhost:
description:
- vhost to apply access privileges.
- This option will be ignored when permissions option is used.
required: false
default: /
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
configure_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
write_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
read_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
- This option will be ignored when permissions option is used.
required: false
default: ^$
force:
description:
- Deletes and recreates the user.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if user is to be added or removed
required: false
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Add user to server and assign full access control on / vhost.
# The user might have permission rules for other vhost but you don't care.
- rabbitmq_user:
user: joe
password: changeme
vhost: /
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
# Add user to server and assign full access control on / vhost.
# The user doesn't have permission rules for other vhosts
- rabbitmq_user:
user: joe
password: changeme
permissions:
- vhost: /
configure_priv: .*
read_priv: .*
write_priv: .*
state: present
'''
from ansible.module_utils.basic import AnsibleModule
class RabbitMqUser(object):
def __init__(self, module, username, password, tags, permissions,
node, bulk_permissions=False):
self.module = module
self.username = username
self.password = password
self.node = node
if not tags:
self.tags = list()
else:
self.tags = tags.split(',')
self.permissions = permissions
self.bulk_permissions = bulk_permissions
self._tags = None
self._permissions = []
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or run_in_check_mode:
cmd = [self._rabbitmqctl, '-q']
if self.node is not None:
cmd.extend(['-n', self.node])
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
users = self._exec(['list_users'], True)
for user_tag in users:
if '\t' not in user_tag:
continue
user, tags = user_tag.split('\t')
if user == self.username:
for c in ['[', ']', ' ']:
tags = tags.replace(c, '')
if tags != '':
self._tags = tags.split(',')
else:
self._tags = list()
self._permissions = self._get_permissions()
return True
return False
def _get_permissions(self):
perms_out = self._exec(['list_user_permissions', self.username], True)
perms_list = list()
for perm in perms_out:
vhost, configure_priv, write_priv, read_priv = perm.split('\t')
if not self.bulk_permissions:
if vhost == self.permissions[0]['vhost']:
perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
write_priv=write_priv, read_priv=read_priv))
break
else:
perms_list.append(dict(vhost=vhost, configure_priv=configure_priv,
write_priv=write_priv, read_priv=read_priv))
return perms_list
def add(self):
if self.password is not None:
self._exec(['add_user', self.username, self.password])
else:
self._exec(['add_user', self.username, ''])
self._exec(['clear_password', self.username])
def delete(self):
self._exec(['delete_user', self.username])
def set_tags(self):
self._exec(['set_user_tags', self.username] + self.tags)
def set_permissions(self):
for permission in self._permissions:
if permission not in self.permissions:
cmd = ['clear_permissions', '-p']
cmd.append(permission['vhost'])
cmd.append(self.username)
self._exec(cmd)
for permission in self.permissions:
if permission not in self._permissions:
cmd = ['set_permissions', '-p']
cmd.append(permission['vhost'])
cmd.append(self.username)
cmd.append(permission['configure_priv'])
cmd.append(permission['write_priv'])
cmd.append(permission['read_priv'])
self._exec(cmd)
def has_tags_modifications(self):
return set(self.tags) != set(self._tags)
def has_permissions_modifications(self):
return sorted(self._permissions) != sorted(self.permissions)
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None, no_log=True),
tags=dict(default=None),
permissions=dict(default=list(), type='list'),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
permissions = module.params['permissions']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
bulk_permissions = True
if not permissions:
perm = {
'vhost': vhost,
'configure_priv': configure_priv,
'write_priv': write_priv,
'read_priv': read_priv
}
permissions.append(perm)
bulk_permissions = False
rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
node, bulk_permissions=bulk_permissions)
result = dict(changed=False, user=username, state=state)
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
result['changed'] = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
result['changed'] = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
result['changed'] = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
result['changed'] = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
import pygame, sys, math, random
from pygame.locals import *
FPS = 60
MAX_X = 800
MAX_Y = 600
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
GRAY = (128, 128, 128)
DARKGRAY = ( 40, 40, 40)
RED = (255, 40, 40)
BLUE = (40, 40, 255)
GREEN = (40, 255, 40)
BGCOLOR = BLACK
#---- class: Paddle ------------------------------------------------------
class Paddle(pygame.sprite.Sprite):
def __init__(self, pos, color = BLUE):
pygame.sprite.Sprite.__init__(self)
self.load_sprite(color)
self.score = 0
self.rect.center = pos
self.cmd = [0, 0] # [up, down]
self.speed = 8
def load_sprite(self, color):
self.image = pygame.Surface((16, 80))
self.image.fill(BGCOLOR)
self.image.set_colorkey(BGCOLOR)
pygame.draw.rect(self.image, color, (0, 8, 16, 64))
pygame.draw.circle(self.image, color, (8, 8), 8)
pygame.draw.circle(self.image, color, (8, 72), 8)
self.rect = self.image.get_rect()
def set_cmd(self, index):
"""Set various commands based on user input. 0 = up, 1 = down"""
self.cmd[index] = 1
def unset_cmd(self, index):
"""Clear a command. 0 = up, 1 = down"""
self.cmd[index] = 0
def clear_cmd(self):
"""Clear all commands currently set."""
for i in range(0, len(self.cmd)):
self.cmd[i] = 0
def update(self):
dy = (self.cmd[1] - self.cmd[0]) * self.speed
self.rect.move_ip(0, dy)
if self.rect.top < 0:
self.rect.top = 0
if self.rect.bottom > MAX_Y:
self.rect.bottom = MAX_Y
def get_score_img(self):
return img
#---- class: Ball --------------------------------------------------------
class Ball(pygame.sprite.Sprite):
def __init__(self, vel = None, pos = None):
pygame.sprite.Sprite.__init__(self)
if pos == None:
pos = [MAX_X//2, MAX_Y//2]
self.pos = pos
if vel == None:
vel = [3, 4]
self.vel = vel
self.load_sprite()
def load_sprite(self):
self.image = pygame.Surface((16, 16))
self.image.fill(BGCOLOR)
self.image.set_colorkey(BGCOLOR)
pygame.draw.circle(self.image, RED, (8, 8), 8)
self.rect = self.image.get_rect()
def update(self):
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
self.rect.center = self.pos
#---- class: Game --------------------------------------------------------
class Game():
def __init__(self, display):
self.display = display
def terminate():
pygame.quit()
sys.exit()
def advance_frame():
pygame.display.update()
FPSCLOCK.tick(FPS)
def handle_events():
pass
#### main ################################################################
def main():
pygame.init()
CLOCK = pygame.time.Clock()
DISPLAY = pygame.display.set_mode((MAX_X, MAX_Y))
FONT1 = pygame.font.SysFont('courier', 45)
pygame.display.set_caption('pyGame Template')
random.seed()
player1 = Paddle((20, MAX_Y//2), BLUE)
player2 = Paddle((MAX_X-20, MAX_Y//2), GREEN)
players = pygame.sprite.RenderPlain(player1, player2)
balls = pygame.sprite.RenderPlain( Ball() )
arena = pygame.Rect(0, 0, MAX_X, MAX_Y)
allsprites = pygame.sprite.Group(players, balls)
while True:
#event handling
for event in pygame.event.get():
if event.type == QUIT:
terminate()
#TODO should this check be made within Paddle?
elif event.type == KEYDOWN:
if event.key == K_UP: player2.set_cmd(0)
elif event.key == K_DOWN: player2.set_cmd(1)
elif event.key == K_w: player1.set_cmd(0)
elif event.key == K_s: player1.set_cmd(1)
elif event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
elif event.key == K_UP: player2.unset_cmd(0)
elif event.key == K_DOWN: player2.unset_cmd(1)
elif event.key == K_w: player1.unset_cmd(0)
elif event.key == K_s: player1.unset_cmd(1)
# update game state
players.update()
balls.update()
# detect ball colliding with walls or endzones
for b in balls:
if b.rect.top < 0 or b.rect.bottom > MAX_Y:
b.vel[1] = -b.vel[1]
if b.rect.right < 0:
player2.score += 1
b.kill()
balls.add(Ball([4, 5]))
if b.rect.left > MAX_X:
player1.score += 1
b.kill()
balls.add(Ball([-4, 5]))
# detect collision with paddles
for b in pygame.sprite.spritecollide(player1, balls, 0):
b.vel[0] = -b.vel[0]
for b in pygame.sprite.spritecollide(player2, balls, 0):
b.vel[0] = -b.vel[0]
# draw frame
DISPLAY.fill(BGCOLOR)
players.draw(DISPLAY)
balls.draw(DISPLAY)
p1_img = FONT1.render("%d"%(player1.score), True, GRAY, BGCOLOR)
p1_img_pos = ( MAX_X//4-p1_img.get_width(), 10 )
DISPLAY.blit(p1_img, p1_img_pos )
p2_img = FONT1.render("%d"%(player2.score), True, GRAY, BGCOLOR)
p2_img_pos = ( MAX_X*3//4-p2_img.get_width(), 10 )
DISPLAY.blit(p2_img, p2_img_pos )
pygame.draw.line(DISPLAY, GRAY, (MAX_X//2, 0), (MAX_X//2, MAX_Y))
pygame.display.update()
CLOCK.tick(FPS)
terminate()
if __name__ == '__main__':
main()
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for computing default gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
def get_zeros_dtype(t):
"""Return the dtype for the default gradient for a Tensor."""
if t.dtype == dtypes.resource:
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
if (handle_data is None or not handle_data.is_set or
len(handle_data.shape_and_type) != 1):
raise ValueError("Internal error: Tried to take gradients (or similar) "
"of a variable without handle data:\n%s" % str(t))
return handle_data.shape_and_type[0].dtype
return t.dtype
def shape_and_dtype(t):
"""Return the shape and dtype for the default gradient for a Tensor."""
if t.dtype == dtypes.resource:
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
if (handle_data is None or not handle_data.is_set or
len(handle_data.shape_and_type) != 1):
raise ValueError("Internal error: Tried to take gradients (or similar) "
"of a variable without handle data:\n%s" % str(t))
shape_and_type = handle_data.shape_and_type[0]
return (tensor_shape.TensorShape(shape_and_type.shape),
dtypes.as_dtype(shape_and_type.dtype))
return t.shape, t.dtype
def zeros_like(t):
"""Like array_ops.zeros_like, but respects resource handles."""
if t.dtype == dtypes.resource:
return array_ops.zeros(*shape_and_dtype(t))
else:
return array_ops.zeros_like(t)
def ones_like(t):
"""Like array_ops.ones_like, but respects resource handles."""
if t.dtype == dtypes.resource:
return array_ops.ones(*shape_and_dtype(t))
else:
return array_ops.ones_like(t)
|
"""
Creates a confusion matrix.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
label_file = "./data/outcome.txt"
def confusion(probs, y_test):
target_names = []
with open(label_file, "rb") as vfile:
for line in vfile.readlines():
line = line.strip()
if not line: continue
target_names.append(line.split(',')[1])
n_hit = 0 # Hit or True Positive (TP)
n_correct_rej = 0 # Correct rejection or True Negative (TN)
n_miss = 0 # Miss or False Negative (FN)
n_false_alarm = 0 # False alarm, or False Positive (FP)
for i in range(len(probs)):
p = probs[i]
pred = 0.5 < p
if pred == 1:
if y_test[i] == 1:
n_hit = n_hit + 1
else:
n_false_alarm = n_false_alarm + 1
else:
if y_test[i] == 1:
n_miss = n_miss + 1
else:
n_correct_rej = n_correct_rej + 1
print "Confusion matrix"
print "{:25s} {:20s} {:20s}".format("", "Output " + target_names[1], "Output " + target_names[0])
print "{:25s}{:2.0f}{:19s}{:2.0f}".format("Predicted " + target_names[1], n_hit,"", n_false_alarm)
print "{:25s}{:2.0f}{:19s}{:2.0f}".format("Predicted " + target_names[0], n_miss,"", n_correct_rej)
return (n_hit, n_false_alarm, n_miss, n_correct_rej)
|
# -*- Mode: Python; test-case-name: flumotion.test.test_defer -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import random
from twisted.internet import defer, reactor
from twisted.python import reflect
# FIXME: this is for HandledException - maybe it should move here instead ?
from flumotion.common import errors
__version__ = "$Rev$"
# See flumotion.test.test_defer for examples
def defer_generator(proc):
def wrapper(*args, **kwargs):
gen = proc(*args, **kwargs)
result = defer.Deferred()
# To support having the errback of last resort, we need to have
# an errback which runs after all the other errbacks, *at the
# point at which the deferred is fired*. So users of this code
# have from between the time the deferred is created and the
# time that the deferred is fired to attach their errbacks.
#
# Unfortunately we only control the time that the deferred is
# created. So we attach a first errback that then adds an
# errback to the end of the list. Unfortunately we can't add to
# the list while the deferred is firing. In a decision between
# having decent error reporting and being nice to a small part
# of twisted I chose the former. This code takes a reference to
# the callback list, so that we can add an errback to the list
# while the deferred is being fired. It temporarily sets the
# state of the deferred to not having been fired, so that adding
# the errbacks doesn't automatically call the newly added
# methods.
result.__callbacks = result.callbacks
def with_saved_callbacks(proc, *_args, **_kwargs):
saved_callbacks, saved_called = result.callbacks, result.called
result.callbacks, result.called = result.__callbacks, False
proc(*_args, **_kwargs)
result.callbacks, result.called = saved_callbacks, saved_called
# Add errback-of-last-resort
def default_errback(failure, d):
# an already handled exception just gets propagated up without
# doing a traceback
if failure.check(errors.HandledException):
return failure
def print_traceback(f):
import traceback
print 'flumotion.twisted.defer: ' + \
'Unhandled error calling', proc.__name__, ':', f.type
traceback.print_exc()
with_saved_callbacks(lambda: d.addErrback(print_traceback))
raise
result.addErrback(default_errback, result)
def generator_next():
try:
x = gen.next()
if isinstance(x, defer.Deferred):
x.addCallback(callback, x).addErrback(errback, x)
else:
result.callback(x)
except StopIteration:
result.callback(None)
except Exception, e:
result.errback(e)
def errback(failure, d):
def raise_error():
# failure.parents[-1] will be the exception class for local
# failures and the string name of the exception class
# for remote failures (which might not exist in our
# namespace)
#
# failure.value will be the tuple of arguments to the
# exception in the local case, or a string
# representation of that in the remote case (see
# pb.CopyableFailure.getStateToCopy()).
#
# we can only reproduce a remote exception if the
# exception class is in our namespace, and it only takes
# one string argument. if either condition is not true,
# we wrap the strings in a default Exception.
k, v = failure.parents[-1], failure.value
try:
if isinstance(k, str):
k = reflect.namedClass(k)
if isinstance(v, tuple):
e = k(*v)
else:
e = k(v)
except Exception:
e = Exception('%s: %r' % (failure.type, v))
raise e
d.value = raise_error
generator_next()
def callback(result, d):
d.value = lambda: result
generator_next()
generator_next()
return result
return wrapper
def defer_generator_method(proc):
return lambda self, *args, **kwargs: \
defer_generator(proc)(self, *args, **kwargs)
def defer_call_later(deferred):
"""
Return a deferred which will fire from a callLater after d fires
"""
def fire(result, d):
reactor.callLater(0, d.callback, result)
res = defer.Deferred()
deferred.addCallback(fire, res)
return res
class Resolution:
"""
I am a helper class to make sure that the deferred is fired only once
with either a result or exception.
@ivar d: the deferred that gets fired as part of the resolution
@type d: L{twisted.internet.defer.Deferred}
"""
def __init__(self):
self.d = defer.Deferred()
self.fired = False
def cleanup(self):
"""
Clean up any resources related to the resolution.
Subclasses can implement me.
"""
pass
def callback(self, result):
"""
Make the result succeed, triggering the callbacks with
the given result. If a result was already reached, do nothing.
"""
if not self.fired:
self.fired = True
self.cleanup()
self.d.callback(result)
def errback(self, exception):
"""
Make the result fail, triggering the errbacks with the given exception.
If a result was already reached, do nothing.
"""
if not self.fired:
self.fired = True
self.cleanup()
self.d.errback(exception)
class RetryingDeferred(object):
"""
Provides a mechanism to attempt to run some deferred operation until it
succeeds. On failure, the operation is tried again later, exponentially
backing off.
"""
maxDelay = 1800 # Default to 30 minutes
initialDelay = 5.0
# Arbitrarily take these constants from twisted's ReconnectingClientFactory
factor = 2.7182818284590451
jitter = 0.11962656492
delay = None
def __init__(self, deferredCreate, *args, **kwargs):
"""
Create a new RetryingDeferred. Will call
deferredCreate(*args, **kwargs) each time a new deferred is needed.
"""
self._create = deferredCreate
self._args = args
self._kwargs = kwargs
self._masterD = None
self._running = False
self._callId = None
def start(self):
"""
Start trying. Returns a deferred that will fire when this operation
eventually succeeds. That deferred will only errback if this
RetryingDeferred is cancelled (it will then errback with the result of
the next attempt if one is in progress, or a CancelledError.
# TODO: yeah?
"""
self._masterD = defer.Deferred()
self._running = True
self.delay = None
self._retry()
return self._masterD
def cancel(self):
if self._callId:
self._callId.cancel()
self._masterD.errback(errors.CancelledError())
self._masterD = None
self._callId = None
self._running = False
def _retry(self):
self._callId = None
d = self._create(*self._args, **self._kwargs)
d.addCallbacks(self._success, self._failed)
def _success(self, val):
# TODO: what if we were cancelled and then get here?
self._masterD.callback(val)
self._masterD = None
def _failed(self, failure):
if self._running:
nextDelay = self._nextDelay()
self._callId = reactor.callLater(nextDelay, self._retry)
else:
self._masterD.errback(failure)
self._masterD = None
def _nextDelay(self):
if self.delay is None:
self.delay = self.initialDelay
else:
self.delay = self.delay * self.factor
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)
self.delay = min(self.delay, self.maxDelay)
return self.delay
|
"""praisetex.py - Program used to generate presentation slides and chords
sheets"""
import sys
import os
from collections import deque
import argparse
import core
import gui
# get praisetex folders's absolute path
praisetex_dir = os.path.dirname(os.path.abspath(__file__))
def runGUI():
app = gui.PraiseTexGUI(os.path.join(praisetex_dir, "songs"))
app.run()
def chords(filename):
if len(filename) > 0:
print("Creating chords from: {}".format(args.filename))
songList = [os.path.basename(f) for f in filename]
praisetex = core.PraiseTex()
praisetex.refreshSongList()
index = 0
for songtitle in songList:
praisetex.addSong(index, songtitle)
index += 1
error = praisetex.compileChords()
if error:
print("pdflatex has failed")
else:
print("Compiled chords.pdf")
def slides(filename):
if len(filename) > 0:
print("Creating slides from: {}".format(args.filename))
songList = [os.path.basename(f) for f in filename]
praisetex = core.PraiseTex()
praisetex.refreshSongList()
index = 0
for songtitle in songList:
praisetex.addSong(index, songtitle)
index += 1
error = praisetex.compileSlides()
if error:
print("pdflatex has failed")
else:
print("Compiled slides.pdf")
def getParser():
parser = argparse.ArgumentParser(description='praiseTex: program for creating guitar chordsheets and presentation slides.')
# options compiling multiple song files
parser.add_argument(action='store', dest='filename', nargs='*')
parser.add_argument('-c', '--chords', action='store_true', default=False,
help='create chord sheets from provided song files')
parser.add_argument('-s', '--slides', action='store_true', default=False,
help='create presentation slides from provided song files')
# options for altering song files
# parser.add_argument('--transpose', action='store', type=int, metavar='N',
# help='transpose song file by number of half steps')
return parser
if __name__ == '__main__':
# command line parsing and handling
parser = getParser()
args = parser.parse_args()
if args.chords or args.slides: # creating chords or slides
if args.chords:
chords(args.filename)
if args.slides:
slides(args.filename)
# elif args.transpose is not None: # transposing song
# transpose(args.filename, args.transpose)
else:
runGUI()
|
#!/usr/bin/env python
###############################################################################
# #
# NucmerParser.py #
# #
# Class for parsing Nucmer output #
# #
# Copyright (C) Michael Imelfort, Donovan Parks #
# #
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Michael Imelfort"
__copyright__ = "Copyright 2014"
__credits__ = ["Michael Imelfort, Donovan Parks"]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Michael Imelfort"
__email__ = "[email protected]"
__status__ = "Dev"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class NucMerParser:
"""Wrapper class for parsing nucmer output"""
# constants to make the code more readable
_START_1 = 0
_END_1 = 1
_START_2 = 2
_END_2 = 3
_LEN_1 = 4
_LEN_2 = 5
_IDENTITY = 6
_ID_1 = 7
_ID_2 = 8
def __init__(self):
self.prepped = False
def readNuc(self, fp):
"""Read through a nucmer coords file
this is a generator function
"""
line = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not self.prepped:
# we still need to strip out the header
for l in fp: # search for the first record
if l[0] == '=': # next line is good
self.prepped = True
break
# file should be prepped now
for l in fp:
fields = l.split('|')
yield ([int(i) for i in fields[0].split()] +
[int(i) for i in fields[1].split()] +
[int(i) for i in fields[2].split()] +
[float(i) for i in fields[3].split()] +
fields[4].split())
break # done!
###############################################################################
###############################################################################
###############################################################################
###############################################################################
|
from hashlib import md5
import datetime
from flask_turboduck.auth import BaseUser
from peewee import *
from app import db
class User(db.Model, BaseUser):
username = CharField()
password = CharField()
email = CharField()
join_date = DateTimeField(default=datetime.datetime.now)
active = BooleanField(default=True)
admin = BooleanField(default=False)
def __unicode__(self):
return self.username
def following(self):
return User.select().join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user==self).order_by(User.username)
def followers(self):
return User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user==self).order_by(User.username)
def is_following(self, user):
return Relationship.select().where(
Relationship.from_user==self,
Relationship.to_user==user
).exists()
def gravatar_url(self, size=80):
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(self.email.strip().lower().encode('utf-8')).hexdigest(), size)
class Relationship(db.Model):
from_user = ForeignKeyField(User, related_name='relationships')
to_user = ForeignKeyField(User, related_name='related_to')
def __unicode__(self):
return 'Relationship from %s to %s' % (self.from_user, self.to_user)
class Message(db.Model):
user = ForeignKeyField(User)
content = TextField()
pub_date = DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return '%s: %s' % (self.user, self.content)
class Note(db.Model):
user = ForeignKeyField(User)
message = TextField()
status = IntegerField(choices=((1, 'live'), (2, 'deleted')), null=True)
created_date = DateTimeField(default=datetime.datetime.now)
|
#!/usr/bin/env python
#
# Copyright 2008 Bodil Stokke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import Qt, QSize, QRect, QPoint, QTimer, SIGNAL, QUrl, QRectF
from PyQt4.QtGui import QWidget, QCursor, QPixmap, QPainter, QApplication, QFont, \
QFontMetrics, QTextOption, QPen
from PyQt4.QtWebKit import QWebView
class PreviewTooltip(QWidget):
def __init__(self, url):
QWidget.__init__(self, None, Qt.ToolTip)
self.url = url
self.font = QFont(QApplication.font(self))
self.font.setPointSize(8)
desktop = QApplication.desktop().availableGeometry(self)
cursor = QCursor.pos()
rect = QRect(cursor + QPoint(-10, 10), QSize(240,180 + QFontMetrics(self.font).height()))
if rect.left() < desktop.left():
rect.moveLeft(desktop.left())
if rect.right() > desktop.right():
rect.moveRight(cursor.x() - 10)
if rect.bottom() > desktop.bottom():
rect.moveBottom(cursor.y() - 10)
self.setGeometry(rect)
self.pixmap = None
self.progress = 0
self.title = unicode(self.url)
self.webView = QWebView()
self.webView.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
self.webView.page().mainFrame().setScrollBarPolicy(Qt.Vertical, Qt.ScrollBarAlwaysOff)
self.webView.resize(1024,768)
self.webView.load(QUrl(url))
self.timer = QTimer(self)
self.connect(self.timer, SIGNAL("timeout()"), self.refresh)
self.timer.start(3000)
self.connect(self.webView, SIGNAL("loadFinished(bool)"), self.refresh)
self.connect(self.webView, SIGNAL("loadProgress(int)"), self.updateProgress)
self.connect(self.webView, SIGNAL("urlChanged(const QUrl&)"), self.newUrl)
self.connect(self.webView, SIGNAL("titleChanged(const QString&)"), self.newTitle)
def updateProgress(self, progress):
self.progress = progress
self.update()
def newUrl(self, url):
self.title = unicode(url.toString())
self.update()
def newTitle(self, title):
self.title = unicode(title)
self.update()
def refresh(self):
view = QPixmap(self.webView.size())
self.webView.render(view)
self.pixmap = view.scaled(QSize(240,180), Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
self.update()
def paintEvent(self, event):
QWidget.paintEvent(self, event)
p = QPainter(self)
p.setFont(self.font)
if self.pixmap:
p.drawPixmap(QPoint(0,0), self.pixmap)
r = QRect(self.rect().topLeft() + QPoint(0, 180), QSize(self.rect().size().width(), p.fontMetrics().height()))
p.fillRect(r, self.palette().midlight())
if self.progress:
pr = QRect(r)
pr.setWidth( (r.width() * self.progress) / 100 )
p.fillRect(pr, self.palette().dark())
p.setBrush(Qt.NoBrush)
p.setPen(QPen(self.palette().text(), 1))
p.drawText(QRectF(r), self.title, QTextOption(Qt.AlignHCenter))
p.setPen(QPen(self.palette().shadow(), 1))
p.drawRect(self.rect().adjusted(0,0,-1,-1))
|
#!/usr/bin/python3
'''
*** Copyright (c) 2017-2020 by Extreme Networks, Inc.
*** All rights reserved.
'''
import argparse
import re
import sys
from SLX_BitMap import BitMap
from SLX_IntfTypeMap import IntfTypeMap
from SLX_TunnelTypeMap import TunnelTypeMap
from SLXRSpeedMap import SLXRSpeedMap
from SLXSSpeedMap import SLXSSpeedMap
from SLX_PortData import PortData
from SLX_PortMapping import PortMapping
from IfIndex import IfIndex
from SLX_IfIndex_Core import Slx_IfIndex_Core
from Slx9850_IfIndex import Slx9850_IfIndex
from Slx9740_IfIndex import Slx9740_IfIndex
from Slx9640_IfIndex import Slx9640_IfIndex
from Slx9540_IfIndex import Slx9540_IfIndex
from Slx9250_IfIndex import Slx9250_IfIndex
from Slx9240_IfIndex import Slx9240_IfIndex
from Slx9150_IfIndex import Slx9150_IfIndex
from Slx9140_IfIndex import Slx9140_IfIndex
from Slx9030_IfIndex import Slx9030_IfIndex
from Slx_IfIndex import Slx_IfIndex
def main():
'''
This script uses the device type, human readable interface, interface
speed, and other information to calculate the SNMP IFINDEX for that
interface, and returns it as a decimal (for use with snmp), hexadecimal, or
a binary value as a string.
'''
sys.tracebacklimit = 0
parser = argparse.ArgumentParser(description='Script to generate ifIndex '
+ 'offline for SLX family of products.')
parser.add_argument('--interface', '-i',
help='The interface name in the format of '
+ '<type> <slot>/<port> or <type> <port>. '
+ 'Examples: e 1/1, e 2/1:1, tun 1, ve 20, po 1, m 1',
required=True)
parser.add_argument('--device', '-d', type=str,
help='SLX device in the format of the 4 digit '
+ 'product number. Examples: 9850, 9140',
required=True)
parser.add_argument('--linecard', '-l', default='', type=str,
choices=['72x10G', '36x100G', '48Y', '48XT', '40C',
'80C'],
help='LC type for 9850, or model for 9150 '
+ 'for physical ports', required=False)
parser.add_argument('--speed', '-s', default='10g', type=str,
help='physical interface speed: [1g | 10g | 25g | 40g '
+ '| 100g]', required=False)
parser.add_argument('--tunnel_type', '-t', default='', type=str,
choices=['vxlan', 'gre', 'nvgre', 'mpls'],
help='Tunnel types', required=False)
parser.add_argument('--output', '-o', default='dec',
choices=['dec', 'hex', 'bin', 'all'],
help='Output Display Mode: [bin | dec | hex | all]'
+ '(default: dec)', required=False)
args = parser.parse_args()
args_dict = vars(args)
# args1 = {'interface': 'e 1/1', 'linecard': '36x100G', 'device': '9850',
# 'disp_mode': 'decimal', 'speed': '100g', 'tunnel_type': None}
dummy = Slx_IfIndex(**args_dict)
if args.output in ['dec', 'all']:
print(dummy.get_if_index('decimal'))
if args.output in ['hex', 'all']:
print(dummy.get_if_index('hex'))
if args.output in ['bin', 'all']:
print(dummy.get_if_index('binary'))
return
if __name__ == '__main__':
main()
|
# coding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
xpath_text,
)
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxEmbedIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .vimeo import VimeoIE
from .dailymotion import (
DailymotionIE,
DailymotionCloudIE,
)
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .vessel import VesselIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': 're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer' in the
# http requests
{
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/rg3/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/rg3/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
# HEAD requests lead to endless 301, while GET is OK
'expected_warnings': ['301'],
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
# This forum does not allow <iframe> syntaxes anymore
# Now HTML tags are displayed as-is
'skip': 'No videos on this page',
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embedded Ustream video
{
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417',
'info_dict': {
'id': '45734260',
'ext': 'flv',
'uploader': 'AU SPA: The NSA and Privacy',
'title': 'NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman'
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/rg3/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
'info_dict': {
'id': '1986',
'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
},
'playlist_mincount': 2,
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:a236581cd2449dd2df4f93412f3f01c6',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen | RTL Nieuws',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed protected with referrer
{
'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero',
'info_dict': {
'id': '1_g4fbemnq',
'ext': 'mp4',
'title': 'Violetta - Achter De Schermen - Ruggero',
'description': 'Achter de schermen met Ruggero',
'timestamp': 1435133761,
'upload_date': '20150624',
'uploader_id': 'echojecka',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'batchUser',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
# Eagle.Platform embed (generic URL)
{
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
},
# ClipYou (Eagle.Platform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': 're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
},
},
# SVT embed
{
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
'id': '2900353',
'ext': 'flv',
'title': 'Här trycker Jagr till Giroux (under SVT-intervjun)',
'duration': 27,
'age_limit': 0,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': '701714499682',
'ext': 'mp4',
'title': 'PREVIEW: On Assignment: David Letterman',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Ooyala embed
{
'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
'info_dict': {
'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
'ext': 'mp4',
'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.',
'title': 'This is what separates the Excel masters from the wannabes',
'duration': 191.933,
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Dailymotion Cloud video
{
'url': 'http://replay.publicsenat.fr/vod/le-debat/florent-kolandjian,dominique-cena,axel-decourtye,laurence-abeille,bruno-parmentier/175910',
'md5': 'dcaf23ad0c67a256f4278bce6e0bae38',
'info_dict': {
'id': 'x2uy8t3',
'ext': 'mp4',
'title': 'Sauvons les abeilles ! - Le débat',
'description': 'md5:d9082128b1c5277987825d684939ca26',
'thumbnail': 're:^https?://.*\.jpe?g$',
'timestamp': 1434970506,
'upload_date': '20150622',
'uploader': 'Public Sénat',
'uploader_id': 'xa9gza',
}
},
# OnionStudios embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '2855',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'thumbnail': 're:^https?://.*\.jpe?g$',
'uploader': 'ClickHole',
'uploader_id': 'clickhole',
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': 'ace83b9ed19b21f68e1b50e844fdf95d',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
}
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': '[email protected]',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
# generic vimeo embed that requires original URL passed as Referer
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
# DBTV embeds
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# }
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
continue
entries.append({
'_type': 'url',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return {
'_type': 'url',
'url': self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if '/' in url:
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = m.group('format_id')
if format_id.endswith('mpegurl'):
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': m.group('format_id'),
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'] = self._parse_ism_formats(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'] = self._parse_mpd_formats(
doc, video_id,
mpd_base_url=full_response.geturl().rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
# Helper method
def _playlist_from_matches(matches, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen('Brightcove video detected.')
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(webpage)
if bc_urls:
return _playlist_from_matches(bc_urls, ie='BrightcoveNew')
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return _playlist_from_matches(tp_urls, ie='ThePlatform')
# Look for Vessel embeds
vessel_urls = VesselIE._extract_urls(webpage)
if vessel_urls:
return _playlist_from_matches(vessel_urls, ie=VesselIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
webpage)
if matches:
return _playlist_from_matches(matches, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return _playlist_from_matches(vimeo_urls, ie=VimeoIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1''', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for lazyYT YouTube embed
matches = re.findall(
r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
# Look for Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: m[-1])
matches = DailymotionIE._extract_urls(webpage)
if matches:
return _playlist_from_matches(matches)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return _playlist_from_matches(
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for embedded Wistia player
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
if match:
embed_url = self._proto_relative_url(
unescapeHTML(match.group('url')))
return {
'_type': 'url_transparent',
'url': embed_url,
'ie_key': 'Wistia',
'uploader': video_uploader,
}
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'url_transparent',
'url': 'wistia:%s' % match.group('id'),
'ie_key': 'Wistia',
'uploader': video_uploader,
}
match = re.search(
r'''(?sx)
<script[^>]+src=(["'])(?:https?:)?//fast\.wistia\.com/assets/external/E-v1\.js\1[^>]*>.*?
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]+)\b.*?\2
''', webpage)
if match:
return self.url_result(self._proto_relative_url(
'wistia:%s' % match.group('id')), 'Wistia')
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage)
if matches:
return {
'_type': 'playlist',
'entries': [{
'_type': 'url',
'ie_key': 'CondeNast',
'url': ma,
} for ma in matches],
'title': video_title,
'id': video_id,
}
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded NovaMov-based player
mobj = re.search(
r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1''', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Facebook player
facebook_url = FacebookIE._extract_url(webpage)
if facebook_url is not None:
return self.url_result(facebook_url, 'Facebook')
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Odnoklassniki')
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return _playlist_from_matches(
matches, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return _playlist_from_matches(matches, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
if sportbox_urls:
return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed')
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return _playlist_from_matches(xhamster_urls, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return _playlist_from_matches(pornhub_urls, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return _playlist_from_matches(drtuber_urls, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return _playlist_from_matches(redtube_urls, ie=RedTubeIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ustream')
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudIE._extract_urls(webpage)
if soundcloud_urls:
return _playlist_from_matches(soundcloud_urls, getter=unescapeHTML, ie=SoundcloudIE.ie_key())
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_url = KalturaIE._extract_url(webpage)
if kaltura_url:
return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
# Look for Eagle.Platform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(eagleplatform_url, EaglePlatformIE.ie_key())
# Look for ClipYou (uses Eagle.Platform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Dailymotion Cloud videos
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, 'DailymotionCloud')
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_url = JWPlatformIE._extract_url(webpage)
if jwplatform_url:
return self.url_result(jwplatform_url, 'JWPlatform')
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Limelight embeds
mobj = re.search(r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})', webpage)
if mobj:
lm = {
'Media': 'media',
'Channel': 'channel',
'ChannelList': 'channel_list',
}
return self.url_result('limelight:%s:%s' % (
lm[mobj.group(1)], mobj.group(2)), 'Limelight%s' % mobj.group(1), mobj.group(2))
mobj = re.search(
r'''(?sx)
<object[^>]+class=(["\'])LimelightEmbeddedPlayerFlash\1[^>]*>.*?
<param[^>]+
name=(["\'])flashVars\2[^>]+
value=(["\'])(?:(?!\3).)*mediaId=(?P<id>[a-z0-9]{32})
''', webpage)
if mobj:
return self.url_result('limelight:media:%s' % mobj.group('id'))
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?vod-platform\.net/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?admin\.mangomolo\.com/analytics/index\.php/customers/embed/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
index\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_url = LiveLeakIE._extract_url(webpage)
if liveleak_url:
return self.url_result(liveleak_url, 'LiveLeak')
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return _playlist_from_matches(dbtv_urls, ie=DBTVIE.ie_key())
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
info_dict.update({
'title': video_title or info_dict['title'],
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit
})
info_dict.update(json_ld)
return info_dict
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
for entry in entries:
entry.update({
'id': video_id,
'title': video_title,
})
self._sort_formats(entry['formats'])
return self.playlist_result(entries)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
|
# coding=utf-8
import calendar
import time
import ujson
import yaml
from datetime import datetime
from listenbrainz.utils import escape, convert_to_unix_timestamp
def flatten_dict(d, seperator='', parent_key=''):
"""
Flattens a nested dictionary structure into a single dict.
Args:
d: the dict to be flattened
seperator: the seperator used in keys in the flattened dict
parent_key: the key that is prefixed to all keys generated during flattening
Returns:
Flattened dict with keys such as key1.key2
"""
result = []
for key, value in d.items():
new_key = "{}{}{}".format(parent_key, seperator, str(key))
if isinstance(value, dict):
result.extend(list(flatten_dict(value, '.', new_key).items()))
else:
result.append((new_key, value))
return dict(result)
def convert_comma_seperated_string_to_list(string):
if not string:
return []
return [val for val in string.split(',')]
class Listen(object):
""" Represents a listen object """
# keys that we use ourselves for private usage
PRIVATE_KEYS = (
'inserted_timestamp',
)
# keys in additional_info that we support explicitly and are not superfluous
SUPPORTED_KEYS = (
'artist_mbids',
'release_group_mbid',
'release_mbid',
'recording_mbid',
'track_mbid',
'work_mbids',
'tracknumber',
'isrc',
'spotify_id',
'tags',
'artist_msid',
'release_msid',
'recording_msid',
)
TOP_LEVEL_KEYS = (
'time',
'user_name',
'artist_name',
'track_name',
'release_name',
)
def __init__(self, user_id=None, user_name=None, timestamp=None, artist_msid=None, release_msid=None,
recording_msid=None, dedup_tag=0, inserted_timestamp=None, data=None):
self.user_id = user_id
self.user_name = user_name
# determine the type of timestamp and do the right thing
if isinstance(timestamp, int) or isinstance(timestamp, float):
self.ts_since_epoch = int(timestamp)
self.timestamp = datetime.utcfromtimestamp(self.ts_since_epoch)
else:
if timestamp:
self.timestamp = timestamp
self.ts_since_epoch = calendar.timegm(self.timestamp.utctimetuple())
else:
self.timestamp = None
self.ts_since_epoch = None
self.artist_msid = artist_msid
self.release_msid = release_msid
self.recording_msid = recording_msid
self.dedup_tag = dedup_tag
self.inserted_timestamp = inserted_timestamp
if data is None:
self.data = {'additional_info': {}}
else:
try:
data['additional_info'] = flatten_dict(data['additional_info'])
except TypeError:
# TypeError may occur here because PostgresListenStore passes strings
# to data sometimes. If that occurs, we don't need to do anything.
pass
self.data = data
@classmethod
def from_json(cls, j):
"""Factory to make Listen() objects from a dict"""
if 'playing_now' in j:
j.update({'listened_at': None})
else:
j['listened_at']=datetime.utcfromtimestamp(float(j['listened_at']))
return cls(
user_id=j.get('user_id'),
user_name=j.get('user_name', ''),
timestamp=j['listened_at'],
artist_msid=j['track_metadata']['additional_info'].get('artist_msid'),
release_msid=j['track_metadata']['additional_info'].get('release_msid'),
recording_msid=j.get('recording_msid'),
dedup_tag=j.get('dedup_tag', 0),
data=j.get('track_metadata')
)
@classmethod
def from_influx(cls, row):
""" Factory to make Listen objects from an influx row
"""
t = convert_to_unix_timestamp(row['time'])
data = {
'release_msid': row.get('release_msid'),
'release_mbid': row.get('release_mbid'),
'recording_mbid': row.get('recording_mbid'),
'release_group_mbid': row.get('release_group_mbid'),
'artist_mbids': convert_comma_seperated_string_to_list(row.get('artist_mbids', '')),
'tags': convert_comma_seperated_string_to_list(row.get('tags', '')),
'work_mbids': convert_comma_seperated_string_to_list(row.get('work_mbids', '')),
'isrc': row.get('isrc'),
'spotify_id': row.get('spotify_id'),
'tracknumber': row.get('tracknumber'),
'track_mbid': row.get('track_mbid'),
}
# The influx row can contain many fields that are user-generated.
# We only need to add those fields which have some value in them to additional_info.
# Also, we need to make sure that we don't add fields like time, user_name etc. into
# the additional_info.
for key, value in row.items():
if key not in data and key not in Listen.TOP_LEVEL_KEYS + Listen.PRIVATE_KEYS and value is not None:
try:
value = ujson.loads(value)
data[key] = value
continue
except (ValueError, TypeError):
pass
# there are some lists in the database that were converted to string
# via str(list) so they can't be loaded via json.
# Example: "['Blank & Jones']"
# However, yaml parses them safely and correctly
try:
value = yaml.safe_load(value)
data[key] = value
continue
except (ValueError, yaml.scanner.ScannerError, yaml.parser.ParserError, Exception):
pass
data[key] = value
return cls(
timestamp=t,
user_name=row.get('user_name'),
artist_msid=row.get('artist_msid'),
recording_msid=row.get('recording_msid'),
release_msid=row.get('release_msid'),
inserted_timestamp=row.get('inserted_timestamp'),
data={
'additional_info': data,
'artist_name': row.get('artist_name'),
'track_name': row.get('track_name'),
'release_name': row.get('release_name'),
}
)
def to_api(self):
"""
Converts listen into the format in which listens are returned in the payload by the api
on get_listen requests
Returns:
dict with fields 'track_metadata', 'listened_at' and 'recording_msid'
"""
track_metadata = self.data.copy()
track_metadata['additional_info']['artist_msid'] = self.artist_msid
track_metadata['additional_info']['release_msid'] = self.release_msid
data = {
'track_metadata': track_metadata,
'listened_at': self.ts_since_epoch,
'recording_msid': self.recording_msid,
'user_name': self.user_name,
}
return data
def to_json(self):
return {
'user_id': self.user_id,
'user_name': self.user_name,
'timestamp': self.timestamp,
'track_metadata': self.data,
'recording_msid': self.recording_msid
}
def to_influx(self, measurement):
"""
Converts listen into dict that can be submitted to influx directly.
Returns:
a dict with appropriate values of measurement, time, tags and fields
"""
if 'tracknumber' in self.data['additional_info']:
try:
tracknumber = int(self.data['additional_info']['tracknumber'])
except (ValueError, TypeError):
tracknumber = None
else:
tracknumber = None
data = {
'measurement' : measurement,
'time' : self.ts_since_epoch,
'fields' : {
'user_name' : escape(self.user_name),
'artist_name' : self.data['artist_name'],
'artist_msid' : self.artist_msid,
'artist_mbids' : ",".join(self.data['additional_info'].get('artist_mbids', [])),
'release_name' : self.data.get('release_name', ''),
'release_msid' : self.release_msid,
'release_mbid' : self.data['additional_info'].get('release_mbid', ''),
'track_name' : self.data['track_name'],
'recording_msid' : self.recording_msid,
'recording_mbid' : self.data['additional_info'].get('recording_mbid', ''),
'tags' : ",".join(self.data['additional_info'].get('tags', [])),
'release_group_mbid': self.data['additional_info'].get('release_group_mbid', ''),
'track_mbid': self.data['additional_info'].get('track_mbid', ''),
'work_mbids': ','.join(self.data['additional_info'].get('work_mbids', [])),
'tracknumber': tracknumber,
'isrc': self.data['additional_info'].get('isrc', ''),
'spotify_id': self.data['additional_info'].get('spotify_id', ''),
'inserted_timestamp': int(time.time()),
}
}
# if we need a dedup tag, then add it to the row
if self.dedup_tag > 0:
data['tags'] = {'dedup_tag': self.dedup_tag}
# add the user generated keys present in additional info to fields
for key, value in self.data['additional_info'].items():
if key in Listen.PRIVATE_KEYS:
continue
if key not in Listen.SUPPORTED_KEYS:
data['fields'][key] = ujson.dumps(value)
return data
def validate(self):
return (self.user_id is not None and self.timestamp is not None and self.artist_msid is not None
and self.recording_msid is not None and self.data is not None)
@property
def date(self):
return self.timestamp
def __repr__(self):
from pprint import pformat
return pformat(vars(self))
def __unicode__(self):
return "<Listen: user_name: %s, time: %s, artist_msid: %s, release_msid: %s, recording_msid: %s, artist_name: %s, track_name: %s>" % \
(self.user_name, self.ts_since_epoch, self.artist_msid, self.release_msid, self.recording_msid, self.data['artist_name'], self.data['track_name'])
def convert_influx_row_to_spark_row(row):
return {
'listened_at': str(row['time']),
'user_name': row['user_name'],
'artist_msid': row['artist_msid'],
'artist_name': row['artist_name'],
'artist_mbids': convert_comma_seperated_string_to_list(row.get('artist_mbids', '')),
'release_msid': row.get('release_msid'),
'release_name': row.get('release_name', ''),
'release_mbid': row.get('release_mbid', ''),
'track_name': row['track_name'],
'recording_msid': row['recording_msid'],
'recording_mbid': row.get('recording_mbid', ''),
'tags': convert_comma_seperated_string_to_list(row.get('tags', [])),
}
|
# Environment.py - Handles names and such
import Data.Data;
import Data.Lang;
import Data.Type;
import Data.Value;
environmentType = Data.Type.Type("Environment");
# somewhere between a scope and a namespace
class Environment(Data.Data.DataValue):
def __init__(self, parent, name):
Data.Data.DataValue.__init__(self, environmentType, name);
self._set("vars", Data.Value.Dict({}));
self._set("parent", parent);
# add a reference to ourselves in our parent
if self._get("parent") != None:
self._get("parent").SetVariable(Data.Value.Variable(str(self._get("name"))), self);
def Evaluate(self):
return self;
def Access(self, name):
if Data.Value.variableType.IsSubtype(name._get("type")):
return self.GetVariable(name);
else:
Data.Data.DataValue.Access(self, name);
def Insert(self, name, value):
if Data.Value.variableType.IsSubtype(name._get("type")):
self.SetVariable(name, value);
else:
Data.Data.DataValue.Insert(self, name, value);
def GetVariable(self, name):
if name in self._get("vars"):
return self._get("vars").Access(name);
if self._get("parent") == None:
raise Exception("Unset variable '{}'".format(repr(name)));
return self._get("parent").GetVariable(name);
def HasVariable(self, name):
if self._get("parent") == None:
return name._value in self._get("vars");
else:
return self._get("parent").HasVariable(name);
def SetVariable(self, name, value, forceHere = False):
if forceHere or name._value in self._get("vars") or not (self._get("parent") != None and self._get("parent").HasVariable(name)):
value._set("name", name);
self._get("vars").Insert(name, value);
else:
self._get("parent").SetVariable(name, value);
def __str__(self):
return "<Environment> " + str(self._get("name")) + ": [" + ", ".join(map(str, self._get("vars")._value)) + "]";
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python2.7/dist-packages/PyQt4/QtGui.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QItemSelectionRange(): # skipped bases: <type 'sip.simplewrapper'>
"""
QItemSelectionRange()
QItemSelectionRange(QItemSelectionRange)
QItemSelectionRange(QModelIndex, QModelIndex)
QItemSelectionRange(QModelIndex)
"""
def bottom(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.bottom() -> int """
return 0
def bottomRight(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.bottomRight() -> QModelIndex """
pass
def contains(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QItemSelectionRange.contains(QModelIndex) -> bool
QItemSelectionRange.contains(int, int, QModelIndex) -> bool
"""
return False
def height(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.height() -> int """
return 0
def indexes(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.indexes() -> list-of-QModelIndex """
pass
def intersect(self, QItemSelectionRange): # real signature unknown; restored from __doc__
""" QItemSelectionRange.intersect(QItemSelectionRange) -> QItemSelectionRange """
return QItemSelectionRange
def intersected(self, QItemSelectionRange): # real signature unknown; restored from __doc__
""" QItemSelectionRange.intersected(QItemSelectionRange) -> QItemSelectionRange """
return QItemSelectionRange
def intersects(self, QItemSelectionRange): # real signature unknown; restored from __doc__
""" QItemSelectionRange.intersects(QItemSelectionRange) -> bool """
return False
def isEmpty(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.isEmpty() -> bool """
return False
def isValid(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.isValid() -> bool """
return False
def left(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.left() -> int """
return 0
def model(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.model() -> QAbstractItemModel """
pass
def parent(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.parent() -> QModelIndex """
pass
def right(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.right() -> int """
return 0
def top(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.top() -> int """
return 0
def topLeft(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.topLeft() -> QModelIndex """
pass
def width(self): # real signature unknown; restored from __doc__
""" QItemSelectionRange.width() -> int """
return 0
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
|
from crysp.bits import *
# from nilsimsa 0.2.4 archive:
# Nilsimsa uses eight sets of character separations (character, character, etc.)
# and takes the three characters and performs a computation on them:
# (((tran[((a)+(n))&255]^tran[(b)]*((n)+(n)+1))+tran[(c)^tran[n]])&255),
# where a, b, and c are the characters, n is 0-7 indicating which separation,
# and tran is a permutation of [0-255].
# The result is a byte, and nilsimsa throws all these bytes from all eight
# separations into one histogram and encodes the histogram.
class Nilsimsa(object):
def __init__(self,target=None):
if target is None: target=53
self.tran = self.maketran(target)
self.reset()
def reset(self):
self.count = 0
self.dacc = [0]*256
self.seen = [None]*4
def update(self,data):
if isinstance(data,str): data = map(ord,data)
for b in data:
w3,w2,w1,w0 = self.seen[-4:]
self.count += 1
if w1!=None:
self.dacc[self.tran3(b,w0,w1,0)] += 1
if w2!=None:
self.dacc[self.tran3(b,w0,w2,1)] += 1
self.dacc[self.tran3(b,w1,w2,2)] += 1
if w3!=None:
self.dacc[self.tran3(b,w0,w3,3)] += 1
self.dacc[self.tran3(b,w1,w3,4)] += 1
self.dacc[self.tran3(b,w2,w3,5)] += 1
#
self.dacc[self.tran3(w3,w0,b,6)] += 1
self.dacc[self.tran3(w3,w2,b,7)] += 1
self.seen.append(b)
return self
def digest(self):
total = 0
if self.count == 3:
total = 1
elif self.count == 4:
total = 4
elif self.count>4:
total = 8*self.count - 28
thres = total//256
code = [0]*32
for i in range(256):
if self.dacc[i]>thres:
code[i>>3] += 1<<(i&7)
self.reset()
return bytes(bytearray(code[::-1]))
def __call__(self,data):
return self.update(data).digest()
def tran3(self,a,b,c,n):
return (((self.tran[(a+n)&255]^self.tran[b]*(n+n+1))+self.tran[c^self.tran[n]])&255)
def maketran(self,target):
T = [0]*256
j=0
for i in range(256):
j = (j*target+1)&255
j += j
if j>255: j-=255
k = 0
while k<i:
if T[k]==j:
j = (j+1)&255
k = 0
k+=1
T[i] = j
return T
def distance(h1,h2):
return Bits(h1).hd(h2)
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Simple Function Graph Plotter
© Thomas Führinger, Sam Tygier 2005-2020
https://github.com/thomasfuhringer/lybniz
Version 3.0.6
Requires PyGObject 3
Released under the terms of the revised BSD license
Modified: 2020-02-03
"""
import sys, os, cairo, gettext, configparser
from math import *
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk, GObject, Pango, Gio, GdkPixbuf
from pathlib import Path
app_version = "3.0.6"
gettext.install("lybniz")
# profiling
enable_profiling = False
if enable_profiling:
from time import time
app_win = None
actions = Gtk.ActionGroup(name="General")
graph = None
connect_points = True
configFile = os.path.expanduser("~/.lybniz.cfg")
config = configparser.ConfigParser()
x_res = 1
x_max = "5.0"
x_min = "-5.0"
x_scale = "1.0"
y_max = "3.0"
y_min = "-3.0"
y_scale = "1.0"
y1 = "sin(x)"
y2 = ""
y3 = ""
# some extra maths functions
def fac(x):
if type(x) != int or x < 0:
raise ValueError
if x==0:
return 1
for n in range(2,x):
x = x*n
return x
def sinc(x):
if x == 0:
return 1
return sin(x)/x
# create a safe namespace for the eval()s in the graph drawing code
def sub_dict(somedict, somekeys, default=None):
return dict([ (k, somedict.get(k, default)) for k in somekeys ])
# a list of the functions from math that we want.
safe_list = ['math', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'floor', 'fmod', 'frexp', 'gamma', 'lgamma', 'hypot', 'ldexp', 'log', 'log2', 'log1p', 'log10', 'modf', 'pi', 'tau', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'factorial']
safe_dict = sub_dict(locals(), safe_list)
#add any needed builtins back in.
safe_dict['abs'] = abs
safe_dict['min'] = min
safe_dict['max'] = max
def marks(min_val,max_val,minor=1):
"Yield positions of scale marks between min and max. For making minor marks, set minor to the number of minors you want between majors"
try:
min_val = float(min_val)
max_val = float(max_val)
except:
print ("Needs 2 numbers")
raise ValueError
if(min_val >= max_val):
print ("Min bigger or equal to max")
raise ValueError
a = 0.2 # tweakable control for when to switch scales
# big a value results in more marks
a = a + log10(minor)
width = max_val - min_val
log10_range = log10(width)
interval = 10 ** int(floor(log10_range - a))
lower_mark = min_val - fmod(min_val,interval)
if lower_mark < min_val:
lower_mark += interval
a_mark = lower_mark
while a_mark <= max_val:
if abs(a_mark) < interval / 2:
a_mark = 0
yield a_mark
a_mark += interval
class GraphClass:
def __init__(self):
def da_configure_event(widget, event):
global x_max, x_min, x_scale, y_max, y_min, y_scale, y1, y2, y3
x_max = app_win.x_max_entry.get_text()
x_min = app_win.x_min_entry.get_text()
x_scale = app_win.x_scale_entry.get_text()
y_max = app_win.y_max_entry.get_text()
y_min = app_win.y_min_entry.get_text()
y_scale = app_win.y_scale_entry.get_text()
y1 = app_win.y1_entry.get_text()
y2 = app_win.y2_entry.get_text()
y3 = app_win.y3_entry.get_text()
gdkWindow = widget.get_window()
width = widget.get_allocated_width()
height = widget.get_allocated_height()
self.surface = gdkWindow.create_similar_surface(cairo.CONTENT_COLOR, width, height)
self.layout = Pango.Layout(widget.create_pango_context())
self.canvas_width = width
self.canvas_height = height
self.x_max = eval(x_max,{"__builtins__":{}},safe_dict)
self.x_min = eval(x_min,{"__builtins__":{}},safe_dict)
self.x_scale = eval(x_scale,{"__builtins__":{}},safe_dict)
self.y_max = eval(y_max,{"__builtins__":{}},safe_dict)
self.y_min = eval(y_min,{"__builtins__":{}},safe_dict)
self.y_scale = eval(y_scale,{"__builtins__":{}},safe_dict)
self.previousMouseX = 0
self.previousMouseY = 0
self.plot()
return True
# Redraw the screen from the backing pixmap
def da_draw_event(widget, cr):
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
return False
# Start marking selection
def button_press_event(widget, event):
global x_sel, y_sel
if event.button == 1:
self.selection[0][0], self.selection[0][1] = int(event.x), int(event.y)
self.selection[1][0], self.selection[1][1] = None, None
# duplicate surface
self.clean_surface = self.surface.create_similar(cairo.CONTENT_COLOR, self.canvas_width, self.canvas_height)
crc = cairo.Context(self.clean_surface)
crc.set_source_surface(self.surface, 0, 0)
crc.paint()
del crc
# End of selection
def da_button_release_event(widget, event):
if event.button == 1 and event.x != self.selection[0][0] and event.y != self.selection[0][1]:
xmi, ymi = min(self.graph_x(self.selection[0][0]), self.graph_x(event.x)), min(self.graph_y(self.selection[0][1]), self.graph_y(event.y))
xma, yma = max(self.graph_x(self.selection[0][0]), self.graph_x(event.x)), max(self.graph_y(self.selection[0][1]), self.graph_y(event.y))
self.x_min, self.y_min, self.x_max, self.y_max = xmi, ymi, xma, yma
parameter_entries_repopulate()
self.plot()
self.selection[1][0] = None
self.selection[0][0] = None
# Draw rectangle during mouse movement
def da_motion_notify_event(widget, event):
if event.is_hint:
dummy, x, y, state = event.window.get_device_position(event.device)
else:
x = event.x
y = event.y
state = event.get_state()
if state & Gdk.ModifierType.BUTTON1_MASK and self.selection[0][0] is not None:
cr = cairo.Context(self.surface)
cr.set_source_surface(self.clean_surface, 0, 0)
cr.paint()
x0 = min(self.selection[0][0], int(x))
y0 = min(self.selection[0][1], int(y))
w = abs(int(x) - self.selection[0][0])
h = abs(int(y) - self.selection[0][1])
self.selection[1][0], self.selection[1][1] = int(x), int(y)
cr.set_source_rgb(0.3, 0.3, 0.3)
cr.set_line_width (0.5)
cr.rectangle(x0, y0, w, h)
cr.stroke()
del cr
widget.queue_draw()
elif state & Gdk.ModifierType.BUTTON2_MASK:
dx = event.x - self.previousMouseX
dy = event.y - self.previousMouseY
dx = dx / self.canvas_width * (self.x_max - self.x_min)
dy = dy / self.canvas_height * (self.y_max - self.y_min)
self.x_min -= dx; self.x_max -= dx
self.y_min += dy; self.y_max += dy
parameter_entries_repopulate()
graph.plot()
self.previousMouseX = event.x
self.previousMouseY = event.y
def da_scroll_event(widget, event):
if event.direction == Gdk.ScrollDirection.UP:
zoom_in(None)
elif event.direction == Gdk.ScrollDirection.DOWN:
zoom_out(None)
self.prev_y = [None, None, None]
# Marked area point[0, 1][x, y]
self.selection = [[None, None], [None, None]]
self.drawing_area = Gtk.DrawingArea()
self.drawing_area.connect("draw", da_draw_event)
self.drawing_area.connect("configure_event", da_configure_event)
self.drawing_area.connect("button_press_event", button_press_event)
self.drawing_area.connect("button_release_event", da_button_release_event)
self.drawing_area.connect("motion_notify_event", da_motion_notify_event)
self.drawing_area.connect("scroll-event", da_scroll_event)
self.drawing_area.set_events(Gdk.EventMask.EXPOSURE_MASK | Gdk.EventMask.LEAVE_NOTIFY_MASK | Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK | Gdk.EventMask.SCROLL_MASK)
self.scale_style = "dec"
def plot(self):
cr = cairo.Context(self.surface)
cr.set_source_rgb(1, 1, 1)
cr.paint()
cr.set_source_rgb(0, 0, 0)
cr.set_line_width (0.2)
app_win.status_bar.remove_all(0)
if (self.scale_style == "cust"):
#draw cross
cr.rectangle(self.canvas_x(0), 0, 0.2, self.canvas_height)
cr.rectangle(0, self.canvas_y(0), self.canvas_width, 0.2)
cr.stroke()
# old style axis marks
iv = self.x_scale * self.canvas_width / (self.x_max - self.x_min) # pixel interval between marks
os = self.canvas_x(0) % iv # pixel offset of first mark
# loop over each mark.
for i in range(int(self.canvas_width / iv + 1)):
# multiples of iv, cause adding of any error in iv, so keep iv as float
# use round(), to get to closest pixel, int() to prevent warning
cr.rectangle(os + i * iv, self.canvas_y(0) - 5, 0.2, 11)
cr.stroke()
# and the y-axis
iv = self.y_scale * self.canvas_height / (self.y_max - self.y_min)
os = self.canvas_y(0) % iv
for i in range(int(self.canvas_height / iv + 1)):
cr.rectangle(self.canvas_x(0) - 5, i * iv + os, 11, 0.2)
cr.stroke()
else:
# new style
factor = 1
if (self.scale_style == "rad"): factor = pi
if (self.scale_style == "tau"): factor = 2 * pi
# where to put the numbers
numbers_x_pos = -10
numbers_y_pos = 10
# where to center the axis
center_x_pix = int(round(self.canvas_x(0)))
center_y_pix = int(round(self.canvas_y(0)))
if (center_x_pix < 5): center_x_pix = 5
if (center_x_pix < 20):numbers_x_pos = 10
if (center_y_pix < 5): center_y_pix = 5
if (center_x_pix > self.canvas_width - 5): center_x_pix = self.canvas_width - 5
if (center_y_pix > self.canvas_height -5): center_y_pix = self.canvas_height - 5;
if (center_y_pix > self.canvas_height -20): numbers_y_pos = - 10
# draw cross
cr.rectangle(center_x_pix, 0, 0.1, self.canvas_height)
cr.rectangle(0, center_y_pix, self.canvas_width, 0.1)
cr.stroke()
for i in marks(self.x_min / factor, self.x_max / factor):
label = '%g' % i
if (self.scale_style == "rad"): label += " π"
if (self.scale_style == "tau"): label += " τ"
i = i * factor
cr.rectangle(self.canvas_x(i), center_y_pix - 5, 0.2, 11)
cr.stroke()
if (numbers_y_pos < 0):
adjust = cr.text_extents(label)[3]
else:
adjust = 0
cr.move_to(int(round(self.canvas_x(i))), center_y_pix + numbers_y_pos - adjust + 7)
cr.show_text(label)
for i in marks(self.y_min,self.y_max):
label = '%g' % i
cr.rectangle(center_x_pix - 5, self.canvas_y(i), 11, 0.2)
cr.stroke()
if (numbers_x_pos < 0):
adjust = cr.text_extents(label)[3]
else:
adjust = 0
cr.move_to(center_x_pix + numbers_x_pos - adjust, int(round(self.canvas_y(i))) + 7)
cr.show_text(label)
# minor marks
for i in marks(self.x_min / factor, self.x_max / factor, minor=10):
i = i * factor
cr.rectangle(self.canvas_x(i), center_y_pix - 2, 0.2, 5)
cr.stroke()
for i in marks(self.y_min, self.y_max, minor=10):
label = '%g' % i
cr.rectangle(center_x_pix - 2, self.canvas_y(i), 5, 0.2)
cr.stroke()
plots = []
# precompile the functions
invalid_input = False
if y1:
try:
compiled_y1 = compile(y1.replace("^","**"), "", 'eval')
plots.append((compiled_y1, 0, (0, 0, 1), y1))
except:
set_statusbar(_("Function") + " '" + y1 + "' " + _("is invalid."))
invalid_input = True
compiled_y1 = None
else:
compiled_y1 = None
if y2:
try:
compiled_y2 = compile(y2.replace("^","**"),"",'eval')
plots.append((compiled_y2, 1, (1, 0, 0), y2))
except:
set_statusbar(_("Function") + " '" + y2 + "' " + _("is invalid."))
invalid_input = True
compiled_y2 = None
else:
compiled_y2 = None
if y3:
try:
compiled_y3 = compile(y3.replace("^","**"), "", 'eval')
plots.append((compiled_y3, 2, (0, 1, 0), y3))
except:
set_statusbar(_("Function") + " '" + y3 + "' " + _("is invalid."))
invalid_input = True
compiled_y3 = None
else:
compiled_y3 = None
self.prev_y = [None, None, None]
if enable_profiling:
start_graph = time()
cr.set_line_width (0.6)
if len(plots) != 0:
for i in range(-1, self.canvas_width, x_res):
x = self.graph_x(i + 1)
for e in plots:
safe_dict['x']=x
try:
y = eval(e[0],{"__builtins__":{}},safe_dict)
y_c = int(round(self.canvas_y(y)))
cr.set_source_rgb(*e[2])
if connect_points and self.prev_y[e[1]] is not None and not ((self.prev_y[e[1]] < 0 and y_c > self.canvas_height) or (y_c < 0 and self.prev_y[e[1]] > self.canvas_height)):
cr.move_to(i, self.prev_y[e[1]])
cr.line_to(i + x_res, y_c)
cr.stroke()
else:
cr.rectangle(i + x_res, y_c, 1, 1)
cr.fill()
self.prev_y[e[1]] = y_c
except:
#print ("Error at %d: %s" % (x, sys.exc_info()))
set_statusbar(_("Function") + " '" + e[3] + "' " + _("is invalid at") + " " + str(int(x)) + ".")
invalid_input = True
self.prev_y[e[1]] = None
if enable_profiling:
print ("Time to draw graph:", (time() - start_graph) * 1000, "ms")
if not invalid_input:
set_statusbar("")
del cr
self.drawing_area.queue_draw()
def canvas_x(self, x):
"Calculate position on canvas to point on graph"
return (x - self.x_min) * self.canvas_width / (self.x_max - self.x_min)
def canvas_y(self, y):
return (self.y_max - y) * self.canvas_height / (self.y_max - self.y_min)
def canvas_point(self, x, y):
return (self.canvas_x(x), self.canvas_y(y))
def graph_x(self, x):
"Calculate position on graph from point on canvas"
return x * (self.x_max - self.x_min) / self.canvas_width + self.x_min
def graph_y(self, y):
return self.y_max - (y * (self.y_max - self.y_min) / self.canvas_height)
def menu_toolbar_create():
app_win.menu_main = Gtk.MenuBar()
menu_file = Gtk.Menu()
menu_item_file = Gtk.MenuItem(label=_("_File"))
menu_item_file.set_submenu(menu_file)
menu_item_file.set_use_underline(True)
actions.save = Gtk.Action(name="Save", label=_("_Save"), tooltip=_("Save graph as bitmap"), stock_id=Gtk.STOCK_SAVE)
actions.save.connect ("activate", save)
actions.add_action(actions.save)
menu_item_save = actions.save.create_menu_item()
menu_item_save.add_accelerator("activate", app_win.accel_group, ord("S"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_file.append(menu_item_save)
actions.quit = Gtk.Action(name="Quit", label=_("_Quit"), tooltip=_("Quit Application"), stock_id=Gtk.STOCK_QUIT)
actions.quit.connect ("activate", quit_dlg)
actions.add_action(actions.quit)
menuItem_quit = actions.quit.create_menu_item()
menuItem_quit.add_accelerator("activate", app_win.accel_group, ord("Q"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_file.append(menuItem_quit)
menu_graph = Gtk.Menu()
menu_item_graph = Gtk.MenuItem(label=_("_Graph"))
menu_item_graph.set_submenu(menu_graph)
menu_item_graph.set_use_underline(True)
actions.plot = Gtk.Action(name="Plot", label=_("P_lot"), tooltip=_("Plot Functions"), stock_id=Gtk.STOCK_REFRESH)
actions.plot.connect ("activate", plot)
actions.add_action(actions.plot)
menu_item_plot = actions.plot.create_menu_item()
menu_item_plot.add_accelerator("activate", app_win.accel_group, ord("l"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_graph.append(menu_item_plot)
actions.evaluate = Gtk.Action(name="Evaluate", label=_("_Evaluate"), tooltip=_("Evaluate Functions"), stock_id=Gtk.STOCK_EXECUTE)
actions.evaluate.connect ("activate", evaluate)
actions.add_action(actions.evaluate)
menu_item_evaluate = actions.evaluate.create_menu_item()
menu_item_evaluate.add_accelerator("activate", app_win.accel_group, ord("e"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_graph.append(menu_item_evaluate)
actions.zoom_in = Gtk.Action(name="zoom_in", label=_("Zoom _In"), tooltip=_("Zoom In"), stock_id=Gtk.STOCK_ZOOM_IN)
actions.zoom_in.connect ("activate", zoom_in)
actions.add_action(actions.zoom_in)
menu_item_zoomin = actions.zoom_in.create_menu_item()
menu_item_zoomin.add_accelerator("activate", app_win.accel_group, ord("+"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_graph.append(menu_item_zoomin)
actions.zoom_out = Gtk.Action(name="zoom_out", label=_("Zoom _Out"), tooltip=_("Zoom Out"), stock_id=Gtk.STOCK_ZOOM_OUT)
actions.zoom_out.connect ("activate", zoom_out)
actions.add_action(actions.zoom_out)
menu_item_zoomout = actions.zoom_out.create_menu_item()
menu_item_zoomout.add_accelerator("activate", app_win.accel_group, ord("-"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_graph.append(menu_item_zoomout)
actions.zoom_reset = Gtk.Action(name="zoom_reset", label=_("Zoom _Reset"), tooltip=_("Zoom Reset"), stock_id=Gtk.STOCK_ZOOM_100)
actions.zoom_reset.connect ("activate", zoom_reset)
actions.add_action(actions.zoom_reset)
menu_item_zoomreset = actions.zoom_reset.create_menu_item()
menu_item_zoomreset.add_accelerator("activate", app_win.accel_group, ord("r"), Gdk.ModifierType.CONTROL_MASK, Gtk.AccelFlags.VISIBLE)
menu_graph.append(menu_item_zoomreset)
menu_item_toggle_connect = Gtk.CheckMenuItem(label=_("_Connect Points"))
menu_item_toggle_connect.set_active(True)
menu_item_toggle_connect.set_use_underline(True)
menu_item_toggle_connect.connect ("toggled", toggle_connect)
menu_graph.append(menu_item_toggle_connect)
menu_scale_style = Gtk.Menu()
menu_item_scale_style = Gtk.MenuItem(label=_("Scale Style"))
menu_item_scale_style.set_submenu(menu_scale_style)
menu_graph.append(menu_item_scale_style)
actions.dec = Gtk.Action(name="Dec", label=_("Decimal"), tooltip=_("Set style to decimal"), stock_id=None)
actions.dec.connect ("activate", scale_dec)
actions.add_action(actions.dec)
menu_item_dec = actions.dec.create_menu_item()
menu_scale_style.append(menu_item_dec)
actions.rad = Gtk.Action(name="Rad", label=_("Radians π"), tooltip=_("Set style to radians"), stock_id=None)
actions.rad.connect ("activate", scale_rad)
actions.add_action(actions.rad)
menu_item_rad = actions.rad.create_menu_item()
menu_scale_style.append(menu_item_rad)
actions.rad_tau = Gtk.Action(name="Radτ", label=_("Radians τ"), tooltip=_("Set style to radians using Tau (τ)"), stock_id=None)
actions.rad_tau.connect ("activate", scale_rad_tau)
actions.add_action(actions.rad_tau)
menu_item_rad_tau = actions.rad_tau.create_menu_item()
menu_scale_style.append(menu_item_rad_tau)
actions.cust = Gtk.Action(name="Cust", label=_("Custom"), tooltip=_("Set style to custom"), stock_id=None)
actions.cust.connect ("activate", scale_cust)
actions.add_action(actions.cust)
menu_item_cust = actions.cust.create_menu_item()
menu_scale_style.append(menu_item_cust)
menu_help = Gtk.Menu()
menu_item_help = Gtk.MenuItem(label=_("_Help"), use_underline=True)
menu_item_help.set_submenu(menu_help)
menu_item_help.set_use_underline(True)
actions.Help = Gtk.Action(name="Help", label=_("_Contents"), tooltip=_("Help Contents"), stock_id=Gtk.STOCK_HELP)
actions.Help.connect ("activate", show_yelp)
actions.add_action(actions.Help)
menu_item_contents = actions.Help.create_menu_item()
menu_item_contents.add_accelerator("activate", app_win.accel_group, Gdk.keyval_from_name("F1"), 0, Gtk.AccelFlags.VISIBLE)
menu_help.append(menu_item_contents)
actions.about = Gtk.Action(name="About", label=_("_About"), tooltip=_("About Box"), stock_id=Gtk.STOCK_ABOUT)
actions.about.connect ("activate", show_about_dialog)
actions.add_action(actions.about)
menu_item_about = actions.about.create_menu_item()
menu_help.append(menu_item_about)
app_win.menu_main.append(menu_item_file)
app_win.menu_main.append(menu_item_graph)
app_win.menu_main.append(menu_item_help)
app_win.tool_bar = Gtk.Toolbar()
app_win.tool_bar.insert(actions.plot.create_tool_item(), -1)
app_win.tool_bar.insert(actions.evaluate.create_tool_item(), -1)
app_win.tool_bar.insert(Gtk.SeparatorToolItem(), -1)
app_win.tool_bar.insert(actions.zoom_in.create_tool_item(), -1)
app_win.tool_bar.insert(actions.zoom_out.create_tool_item(), -1)
app_win.tool_bar.insert(actions.zoom_reset.create_tool_item(), -1)
def plot(widget, event=None):
global x_max, x_min, x_scale, y_max, y_min, y_scale, y1, y2, y3
x_max = app_win.x_max_entry.get_text()
x_min = app_win.x_min_entry.get_text()
x_scale = app_win.x_scale_entry.get_text()
y_max = app_win.y_max_entry.get_text()
y_min = app_win.y_min_entry.get_text()
y_scale = app_win.y_scale_entry.get_text()
graph.x_max = eval(x_max,{"__builtins__":{}},safe_dict)
graph.x_min = eval(x_min,{"__builtins__":{}},safe_dict)
graph.x_scale = eval(x_scale,{"__builtins__":{}},safe_dict)
graph.y_max = eval(y_max,{"__builtins__":{}},safe_dict)
graph.y_min = eval(y_min,{"__builtins__":{}},safe_dict)
graph.y_scale = eval(y_scale,{"__builtins__":{}},safe_dict)
y1 = app_win.y1_entry.get_text()
y2 = app_win.y2_entry.get_text()
y3 = app_win.y3_entry.get_text()
graph.plot()
def evaluate(widget, event=None):
"Evaluate a given x for the three functions"
def entry_changed(self):
for e in ((y1, dlg_win.y1_entry), (y2, dlg_win.y2_entry), (y3, dlg_win.y3_entry)):
try:
x = float(dlg_win.x_entry.get_text())
safe_dict['x']=x
e[1].set_text(str(eval(e[0].replace("^","**"),{"__builtins__":{}},safe_dict)))
except:
if len(e[0]) > 0:
e[1].set_text("Error: " + str(sys.exc_info()[1]))
else:
e[1].set_text("")
def close(self):
dlg_win.destroy()
dlg_win = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
dlg_win.set_title(_("Evaluate"))
dlg_win.connect("destroy", close)
dlg_win.x_entry = Gtk.Entry()
dlg_win.x_entry.set_size_request(200, 24)
dlg_win.x_entry.connect("changed", entry_changed)
dlg_win.y1_entry = Gtk.Entry()
dlg_win.y1_entry.set_size_request(200, 24)
dlg_win.y1_entry.set_sensitive(False)
dlg_win.y2_entry = Gtk.Entry()
dlg_win.y2_entry.set_size_request(200, 24)
dlg_win.y2_entry.set_sensitive(False)
dlg_win.y3_entry = Gtk.Entry()
dlg_win.y3_entry.set_size_request(200, 24)
dlg_win.y3_entry.set_sensitive(False)
grid = Gtk.Grid()
grid.set_property("row-spacing", 2)
grid.set_property("column-spacing", 10)
grid.set_border_width(24)
label = Gtk.Label(label="x = ")
label.set_valign(Gtk.Align.CENTER)
label.set_halign(Gtk.Align.START)
grid.add(label)
grid.attach(dlg_win.x_entry, 1, 0, 1, 1)
label = Gtk.Label(label="y1 = ")
label.set_halign(Gtk.Align.START)
label.set_valign(Gtk.Align.CENTER)
label.get_style_context().add_class("y1")
grid.attach(label, 0, 1, 1, 1)
grid.attach(dlg_win.y1_entry, 1, 1, 1, 1)
label = Gtk.Label(label="y2 = ")
label.set_halign(Gtk.Align.START)
label.set_valign(Gtk.Align.CENTER)
label.get_style_context().add_class("y2")
grid.attach(label, 0, 2, 1, 1)
grid.attach(dlg_win.y2_entry, 1, 2, 1, 1)
label = Gtk.Label(label="y3 = ")
label.set_halign(Gtk.Align.START)
label.set_valign(Gtk.Align.CENTER)
label.get_style_context().add_class("y3")
grid.attach(label, 0, 3, 1, 1)
grid.attach(dlg_win.y3_entry, 1, 3, 1, 1)
dlg_win.add(grid)
dlg_win.show_all()
def zoom_in(widget, event=None):
"Narrow the plotted section by half"
center_x = (graph.x_min + graph.x_max) / 2
center_y = (graph.y_min + graph.y_max) / 2
range_x = (graph.x_max - graph.x_min)
range_y = (graph.y_max - graph.y_min)
graph.x_min = center_x - (range_x / 4)
graph.x_max = center_x + (range_x / 4)
graph.y_min = center_y - (range_y / 4)
graph.y_max = center_y +(range_y / 4)
parameter_entries_repopulate()
graph.plot()
def zoom_out(widget, event=None):
"Double the plotted section"
center_x = (graph.x_min + graph.x_max) / 2
center_y = (graph.y_min + graph.y_max) / 2
range_x = (graph.x_max - graph.x_min)
range_y = (graph.y_max - graph.y_min)
graph.x_min = center_x - (range_x)
graph.x_max = center_x + (range_x)
graph.y_min = center_y - (range_y)
graph.y_max = center_y +(range_y)
parameter_entries_repopulate()
graph.plot()
def zoom_reset(widget, event=None):
"Set the range back to the user's input"
graph.x_min = eval(x_min,{"__builtins__":{}},safe_dict)
graph.y_min = eval(y_min,{"__builtins__":{}},safe_dict)
graph.x_max = eval(x_max,{"__builtins__":{}},safe_dict)
graph.y_max = eval(y_max,{"__builtins__":{}},safe_dict)
parameter_entries_populate()
graph.plot()
def scale_dec(widget, event=None):
graph.scale_style = "dec"
app_win.scale_grid.hide()
plot(None)
def scale_rad(widget, event=None):
graph.scale_style = "rad"
app_win.scale_grid.hide()
plot(None)
def scale_rad_tau(widget, event=None):
graph.scale_style = "tau"
app_win.scale_grid.hide()
plot(None)
def scale_cust(widget, event=None):
graph.scale_style = "cust"
app_win.scale_grid.show()
plot(None)
def toggle_connect(widget, event=None):
"Toggle between a graph that connects points with lines and one that does not"
global connect_points
connect_points = not connect_points
graph.plot()
def save(widget, event=None):
"Save graph as .png"
file_dialog = Gtk.FileChooserDialog(_("Save as..."), app_win, Gtk.FileChooserAction.SAVE, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
file_dialog.set_default_response(Gtk.ResponseType.OK)
filter = Gtk.FileFilter()
filter.add_mime_type("image/png")
filter.add_pattern("*.png")
file_dialog.add_filter(filter)
file_dialog.set_filename("FunctionGraph.png")
response = file_dialog.run()
if response == Gtk.ResponseType.OK:
width = graph.drawing_area.get_allocated_width()
height = graph.drawing_area.get_allocated_height()
pix_buffer = GdkPixbuf.Pixbuf(GdkPixbuf.Colorspace.RGB, False, 8, width, height)
pix_buffer.get_from_drawable(graph.pix_map, graph.pix_map.get_colormap(), 0, 0, 0, 0, width, height)
pix_buffer.save(file_dialog.get_filename(), "png")
file_dialog.destroy()
def set_statusbar(text):
app_win.status_bar.remove_all(0)
app_win.status_bar.push(0, text)
def quit_dlg(widget, event=None):
global config
width, height = app_win.get_size()
config["MainWindow"]["width"] = str(width)
config["MainWindow"]["height"] = str(height)
x, y = app_win.get_position()
config["MainWindow"]["x"] = str(x)
config["MainWindow"]["y"] = str(y)
with open(configFile, "w") as file:
config.write(file)
app_win.destroy()
def show_yelp(widget):
try:
import os
xml_file = Path("doc/lybniz.xml")
if xml_file.is_file():
os.system("yelp doc/lybniz.xml")
else:
xml_file = Path("/usr/share/gnome/help/lybniz/C/lybniz.xml")
if xml_file.is_file():
os.system("yelp /usr/share/gnome/help/lybniz/C/lybniz.xml")
#os.system("yelp /usr/share/gnome/help/lybniz/C/lybniz.xml")
#Gtk.show_uri(None, "lybniz", 0)
except:
print ("Can't Show help")
def show_about_dialog(widget):
about_dialog = Gtk.AboutDialog(transient_for=app_win, modal=True)
about_dialog.set_program_name("Lybniz")
about_dialog.set_version(str(app_version))
about_dialog.set_authors(["Thomas Führinger","Sam Tygier"])
about_dialog.set_comments(_("Function Graph Plotter"))
about_dialog.set_license("BSD")
about_dialog.set_website("https://github.com/thomasfuhringer/lybniz")
about_dialog.set_logo(lybniz_icon)
about_dialog.connect ("response", lambda d, r: d.destroy())
about_dialog.present()
def parameter_entries_create():
# create text entries for parameters
app_win.y1_entry = Gtk.Entry()
app_win.y1_entry.set_size_request(300, 24)
app_win.y1_entry.set_hexpand(True)
app_win.y2_entry = Gtk.Entry()
app_win.y3_entry = Gtk.Entry()
app_win.y3_entry.set_size_request(300, 24)
app_win.x_min_entry = Gtk.Entry()
app_win.x_min_entry.set_width_chars(3)
app_win.x_min_entry.set_size_request(140, 24)
app_win.x_min_entry.set_alignment(xalign=1)
app_win.x_max_entry = Gtk.Entry()
app_win.x_max_entry.set_width_chars(3)
app_win.x_max_entry.set_size_request(140, 24)
app_win.x_max_entry.set_alignment(xalign=1)
app_win.x_scale_entry = Gtk.Entry()
app_win.x_scale_entry.set_width_chars(3)
app_win.x_scale_entry.set_size_request(140, 24)
app_win.x_scale_entry.set_alignment(xalign=1)
app_win.y_min_entry = Gtk.Entry()
app_win.y_min_entry.set_width_chars(3)
app_win.y_min_entry.set_size_request(140, 24)
app_win.y_min_entry.set_alignment(xalign=1)
app_win.y_max_entry = Gtk.Entry()
app_win.y_max_entry.set_width_chars(3)
app_win.y_max_entry.set_size_request(140, 24)
app_win.y_max_entry.set_alignment(xalign=1)
app_win.y_scale_entry = Gtk.Entry()
app_win.y_scale_entry.set_width_chars(3)
app_win.y_scale_entry.set_size_request(140, 24)
app_win.y_scale_entry.set_alignment(xalign=1)
parameter_entries_populate()
app_win.y1_entry.connect("key-press-event", key_press_plot)
app_win.y2_entry.connect("key-press-event", key_press_plot)
app_win.y3_entry.connect("key-press-event", key_press_plot)
app_win.x_min_entry.connect("key-press-event", key_press_plot)
app_win.y_min_entry.connect("key-press-event", key_press_plot)
app_win.x_max_entry.connect("key-press-event", key_press_plot)
app_win.y_max_entry.connect("key-press-event", key_press_plot)
app_win.x_scale_entry.connect("key-press-event", key_press_plot)
app_win.y_scale_entry.connect("key-press-event", key_press_plot)
app_win.scale_grid = Gtk.Grid()
app_win.scale_grid.set_property("column-spacing", 10)
grid = Gtk.Grid()
grid.set_property("row-spacing", 2)
grid.set_property("column-spacing", 10)
grid.set_border_width(6)
label = Gtk.Label(label="y1 = ")
label.set_valign(Gtk.Align.CENTER)
label.set_halign(Gtk.Align.START)
label.get_style_context().add_class("y1")
grid.add(label)
grid.attach(app_win.y1_entry, 1, 0, 1, 1)
label = Gtk.Label(label=_("X min"))
label.set_valign(Gtk.Align.CENTER)
label.set_halign(Gtk.Align.START)
grid.attach(label, 2, 0, 1, 1)
grid.attach(app_win.x_min_entry, 3, 0, 1, 1)
label = Gtk.Label(label=_("Y min"))
label.set_valign(Gtk.Align.CENTER)
label.set_halign(Gtk.Align.START)
grid.attach(label, 4, 0, 1, 1)
grid.attach(app_win.y_min_entry, 5, 0, 1, 1)
label = Gtk.Label(label="y2 = ")
label.set_valign(Gtk.Align.CENTER)
label.get_style_context().add_class("y2")
grid.attach(label, 0, 1, 1, 1)
grid.attach(app_win.y2_entry, 1, 1, 1, 1)
label = Gtk.Label(label=_("X max"))
label.set_valign(Gtk.Align.CENTER)
label.set_halign(Gtk.Align.START)
grid.attach(label, 2, 1, 1, 1)
grid.attach(app_win.x_max_entry, 3, 1, 1, 1)
label = Gtk.Label(label=_("Y max"))
label.set_valign(Gtk.Align.CENTER)
label.set_halign(Gtk.Align.START)
grid.attach(label, 4, 1, 1, 1)
grid.attach(app_win.y_max_entry, 5, 1, 1, 1)
label = Gtk.Label(label="y3 = ")
label.set_valign(Gtk.Align.CENTER)
label.get_style_context().add_class("y3")
grid.attach(label, 0, 2, 1, 1)
grid.attach(app_win.y3_entry, 1, 2, 1, 1)
label = Gtk.Label(label=_("X scale"))
label.set_valign(Gtk.Align.CENTER)
app_win.scale_grid.add(label)
app_win.scale_grid.attach(app_win.x_scale_entry, 1, 0, 1, 1)
label = Gtk.Label(label=_("Y scale"))
label.set_valign(Gtk.Align.CENTER)
app_win.scale_grid.attach(label, 2, 0, 1, 1)
app_win.scale_grid.attach(app_win.y_scale_entry, 3, 0, 1, 1)
grid.attach(app_win.scale_grid, 2, 2, 4, 1)
return grid
def parameter_entries_populate():
# set text in entries for parameters with user's input
app_win.y1_entry.set_text(y1)
app_win.y2_entry.set_text(y2)
app_win.y3_entry.set_text(y3)
app_win.x_min_entry.set_text(x_min)
app_win.x_max_entry.set_text(x_max)
app_win.x_scale_entry.set_text(x_scale)
app_win.y_min_entry.set_text(y_min)
app_win.y_max_entry.set_text(y_max)
app_win.y_scale_entry.set_text(y_scale)
def parameter_entries_repopulate():
# set text in entries for parameters
app_win.y1_entry.set_text(y1)
app_win.y2_entry.set_text(y2)
app_win.y3_entry.set_text(y3)
app_win.x_min_entry.set_text(str(graph.x_min))
app_win.x_max_entry.set_text(str(graph.x_max))
app_win.x_scale_entry.set_text(str(graph.x_scale))
app_win.y_min_entry.set_text(str(graph.y_min))
app_win.y_max_entry.set_text(str(graph.y_max))
app_win.y_scale_entry.set_text(str(graph.y_scale))
def key_press_plot(widget, event):
if event.keyval == 65293:
plot(None)
return True
else:
return False
class LybnizApp(Gtk.Application):
def __init__(self):
Gtk.Application.__init__(self, application_id="apps.lybniz", flags=Gio.ApplicationFlags.FLAGS_NONE)
self.connect("activate", self.on_activate)
def on_activate(self, data=None):
global app_win, graph, config
style_provider = Gtk.CssProvider ()
css = """
button, entry, label {
padding-top: 0px;
padding-bottom: 0px;
min-height: 20px;
min-width: 12px;
}
entry {
padding-left: 5px;
padding-right: 5px;
}
toolbar, statusbar {
margin-left: 0px;
margin-top: 0px;
margin-bottom: 0px;
padding-left: 0px;
padding-top: 0px;
padding-bottom: 0px;
min-height: 10px;
}
label.y1 {
color: blue;
}
label.y2 {
color: red;
}
label.y3 {
color: green;
}
"""
style_provider.load_from_data(css.encode());
Gtk.StyleContext.add_provider_for_screen (Gdk.Screen.get_default(), style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION);
app_win = Gtk.ApplicationWindow(application=self, title="Lybniz")
app_win.connect("delete-event", quit_dlg)
app_win.set_icon(lybniz_icon)
if config.read([configFile, ]) == []:
config.add_section("MainWindow")
app_win.set_default_size(800, 600)
if config.has_option("MainWindow", "width"):
app_win.resize(config.getint("MainWindow", "width"), config.getint("MainWindow", "height"))
if config.has_option("MainWindow", "x"):
app_win.move(config.getint("MainWindow", "x"), config.getint("MainWindow", "y"))
else:
app_win.set_position(Gtk.WindowPosition.CENTER)
app_win.accel_group = Gtk.AccelGroup()
app_win.add_accel_group(app_win.accel_group)
app_win.v_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
#app_win.v_box.set_border_width(1)
menu_toolbar_create()
app_win.v_box.pack_start(app_win.menu_main, False, False, 0)
app_win.v_box.pack_start(app_win.tool_bar, False, False, 0)
app_win.v_box.pack_start(parameter_entries_create(), False, False, 4)
graph = GraphClass()
app_win.v_box.pack_start(graph.drawing_area, True, True, 0)
app_win.status_bar = Gtk.Statusbar()
app_win.status_bar.set_margin_top(0)
app_win.status_bar.set_margin_bottom(0)
app_win.v_box.pack_start(app_win.status_bar, False, False, 0)
app_win.add(app_win.v_box)
app_win.show_all()
app_win.scale_grid.hide()
self.add_window(app_win)
lybniz_icon = GdkPixbuf.Pixbuf.new_from_xpm_data((
"64 64 231 2",
" c None",
"! c black",
"# c #393D39",
"$ c #364336",
"% c #384336",
"& c #364436",
"' c #237A21",
"( c #1A9218",
") c #334D32",
"* c #374237",
"+ c #383D38",
", c #384137",
"- c #199417",
". c #04CE00",
"0 c #04CF00",
"1 c #247623",
"2 c #364236",
"3 c #384237",
"4 c #2E5A2D",
"5 c #0BBC07",
"6 c #06CA02",
"7 c #324E32",
"8 c #3A3D3A",
"9 c #394039",
": c #384138",
"; c #393C39",
"< c #384038",
"= c #344A34",
"> c #3A3A3A",
"? c #383F38",
"@ c #1D891B",
"A c #03C200",
"B c #027E00",
"C c #0A5509",
"D c #027900",
"E c #03BD00",
"F c #12A80F",
"G c #227C20",
"H c #0CBA08",
"I c #05CB01",
"J c #179C14",
"K c #2F592F",
"L c #383B38",
"M c #169E13",
"N c #0B4A0A",
"O c #3E463E",
"P c #8F8F8F",
"Q c #484F48",
"R c #0C370C",
"S c #05CD01",
"T c #1F861D",
"U c #07C604",
"V c #0FAF0C",
"W c #374336",
"X c #11AA0E",
"Y c #03CC00",
"Z c #0B220B",
"[ c #929292",
"] c #EDEDED",
"^ c #A6A6A6",
"_ c #091509",
"` c #03C500",
"a c #257324",
"b c #383A38",
"c c #374337",
"d c #315330",
"e c #0ABD07",
"f c #0CB909",
"g c #364735",
"h c #393E39",
"i c #374137",
"j c #13A510",
"k c #03A300",
"l c #0F3A0F",
"m c #070D07",
"n c #103510",
"o c #039701",
"p c #13A610",
"q c #384236",
"r c #257424",
"s c #05CB02",
"t c #344B33",
"u c #315530",
"v c #131313",
"w c #189816",
"x c #2B6728",
"y c #12A90F",
"z c #286E26",
"{ c #363E36",
"| c #14A311",
"} c #1C8C1A",
"~ c #374236",
" ! c #384633",
"!! c #247B1F",
"#! c #364536",
"$! c #2F572F",
"%! c #FF0000",
"&! c #D60C0C",
"'! c #7D2824",
"(! c #3D3F34",
")! c #364435",
"*! c #237922",
"+! c #11AC0E",
",! c #393D38",
"-! c #169D14",
".! c #2A6629",
"0! c #090909",
"1! c #1C1E1C",
"2! c #313B31",
"3! c #1D861B",
"4! c #2E4E2D",
"5! c #0E0E0E",
"6! c #000200",
"7! c #000300",
"8! c #000400",
"9! c #010101",
":! c #111111",
";! c #293529",
"<! c #314931",
"=! c #0EB40A",
">! c #247122",
"?! c #1F221F",
"@! c #060606",
"A! c #353D35",
"B! c #227920",
"C! c #1C1D1C",
"D! c #161716",
"E! c #344234",
"F! c #0BBD07",
"G! c #0BBB08",
"H! c #353C35",
"I! c #030303",
"J! c #212721",
"K! c #1E851C",
"L! c #15A012",
"M! c #2A352A",
"N! c #1A1C1A",
"O! c #314831",
"P! c #08C404",
"Q! c #276326",
"R! c #191B19",
"S! c #0D0D0D",
"T! c #1A1A1A",
"U! c #202020",
"V! c #334832",
"W! c #08C305",
"X! c #373C37",
"Y! c #181818",
"Z! c #191919",
"[! c #020302",
"]! c #010601",
"^! c #000900",
"_! c #000800",
"`! c #000600",
"a! c #010201",
"b! c #121212",
"c! c #2A2E2A",
"d! c #2A5F29",
"e! c #0FB20B",
"f! c #343C34",
"g! c #171717",
"h! c #303030",
"i! c #2F2F2F",
"j! c #2E2E2E",
"k! c #364136",
"l! c #0EB50A",
"m! c #344833",
"n! c #313131",
"o! c #2D2D2D",
"p! c #2C2C2C",
"q! c #2A6229",
"r! c #09C105",
"s! c #354435",
"t! c #333333",
"u! c #2B2B2B",
"v! c #354035",
"w! c #179915",
"x! c #30512F",
"y! c #1E861C",
"z! c #267025",
"{! c #30572F",
"|! c #30552F",
"}! c #373F37",
"~! c #325031",
" # c #374037",
"!# c #324F32",
"## c #266F25",
"$# c #2B632A",
"%# c #0FB10C",
"&# c #0ABF06",
"'# c #1C8E19",
"(# c #384238",
")# c #179A15",
"*# c #1B9118",
"+# c #2E5C2D",
",# c #286A27",
"-# c #3B3B3B",
".# c #2E5B2D",
"0# c #2B6529",
"1# c #374437",
"2# c #07C603",
"3# c #314C30",
"4# c #374537",
"5# c #1A9317",
"6# c #2A322A",
"7# c #325231",
"8# c #296927",
"9# c #21821E",
":# c #11AB0E",
";# c #169C14",
"<# c #354535",
"=# c #393B39",
"># c #2A6828",
"?# c #08C504",
"@# c #0FB00C",
"A# c #374436",
"B# c #383E38",
"C# c #0AC006",
"D# c #276E26",
"E# c #1F831D",
"F# c #06C903",
"G# c #1F851D",
"H# c #05CC01",
"I# c #2B652A",
"J# c #2F582E",
"K# c #296728",
"L# c #393939",
"M# c #30562F",
"N# c #07C803",
"O# c #199616",
"P# c #354834",
" ! ! ! ! ",
" ! ! ! ! ",
" ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" # $ ! ! ! ! ! ",
" % & ' ( ) * ! ! ! ! ! ",
" + , - . 0 0 . 1 2 ! ! ! ! ! ",
" 3 4 5 0 0 0 0 0 6 7 8 ! ! ! ! ! 9 : ; < = * $ > ",
" ? @ . 0 A B C D E 0 F & ! ! ! ! ! : , G H I 0 0 I J K ? L ",
" < M 0 0 0 N O P Q R 0 S * ! ! ! ! ! + * T U 0 0 0 0 0 0 0 0 V W ; ",
" * X 0 0 0 Y Z [ ] ^ _ ` 0 a b ! ! ! ! ! c d e 0 0 0 0 0 0 0 0 0 0 0 f g h ",
" i j 0 0 0 0 0 k l m n o 0 0 p & ! ! ! ! ! q r s 0 0 0 0 0 0 0 0 0 0 0 0 0 e t + ",
" > u 0 0 0 0 0 0 0 0 0 0 0 0 0 S : ! ! ! ! v < w 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 f ? ",
" , x y . 0 0 0 0 0 0 0 0 0 0 0 z > ! ! ! ! { | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 } * ",
" ; ~ !!!V 0 0 0 0 0 0 0 0 0 | #! ! ! ! ! p 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 . $!; ",
" %!%!%!&!'!(!)!*!+!. 0 0 0 0 0 6 ,! ! ! ! ! 0 0 0 0 0 0 0 0 0 6 -!.!( S 0 0 0 0 0 0 0 f ? ",
" ! ! ! ! ! ! ! ! 0!1!2!3!0 0 0 0 0 0 4!5!! ! ! ! ! ! ! ! ! ! ! ! ! 6!7!7!8!8!8!8!8!8!9!9!:!;!<!=!0 0 0 0 0 0 0 >!?!! ! ! ! ! ",
" ! ! ! ! ! ! ! ! ! ! @!A!e 0 0 0 0 0 B!C!! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! D!E!F!0 0 0 0 0 0 G!H!I!! ! ! ! ",
" ! ! ! ! ! ! ! ! ! ! ! J!K!0 0 0 0 0 L!M!! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! N!O!P!0 0 0 0 0 0 Q!R!! ! ! ! ",
" ! S!T!T!T!T!T!T!T!T!T!U!V!0 0 0 0 0 W!X!Y!Z!Z!5!9!9!9![!]!! ! ! ! ^!_!_!_!_!`!a!9!! ! ! b!Y!Y!Y!c!d!0 0 0 0 0 0 e!f!Y!Y!g!g!h!",
" i!i!i!i!i!i!j!j!j!h!k!l!0 0 0 0 0 m!n!j!j!o!o!p!f!q!S ! ! ! ! 0 0 0 0 r!s!t!p!p!p!p!p!p!u!u!u!v!w!0 0 0 0 0 0 x!h!u!u!u!i!",
" : y!0 0 0 0 0 z!> < {!6 0 ! ! ! ! 0 0 0 s |!< i r!0 0 0 0 0 K!}! ",
" ~!0 0 0 0 0 | $ #!#P!0 0 ! ! ! ! 0 0 0 ##k! < $#0 0 0 0 0 %#$ ",
" ? � 0 0 0 S & i !#W!0 0 0 ! ! ! ! 0 0 '#(# i )#0 0 0 0 . i ",
" $ *#0 0 0 0 0 - ? 3 +#W!0 0 0 0 ! ! ! ! 0 F : * r!0 0 0 0 ,#-# ",
" .#0 0 0 0 0 0 F F 0 0 0 0 0 0 ! ! ! ! G!* : 0#0 0 0 0 )#1# ",
" ? 2#0 0 0 0 0 0 0 0 0 0 0 0 0 ! ! ! ! 3#+ (#J 0 0 0 W!h ",
" 4#5#0 0 0 0 0 0 0 0 0 0 0 0 0 ! ! ! ! 6# * W!0 0 0 7# ",
" !#0 0 0 0 0 0 0 0 0 0 0 0 0 ! ! ! ! 9! : 8#0 0 0 9#h ",
" (#:#0 0 0 0 0 0 0 0 0 0 0 0 ! ! ! ! ! , L!0 0 ;#<# ",
" =#>#0 0 0 0 0 0 0 0 0 0 0 0 ! ! ! ! ! & ?#0 @#A# ",
" B#C#0 0 0 0 0 0 0 0 0 0 e ! ! ! ! ! (#D#0 W!? ",
" i E#0 0 0 0 0 0 0 0 0 F#~!! ! ! ! ! , G#W!? ",
" A#H#0 0 0 0 0 0 0 . I#* ! ! ! ! ! i J#i ",
" * } 0 0 0 0 0 0 6 K#* ! ! ! ! ! L# ",
" < X 0 0 0 0 W!M#* ! ! ! ! ! ",
" A#L!S N#O#P# # ! ! ! ! ! ",
" L#$ #!? 1# ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! ! ! ! ",
" ! ! "))
if __name__ == "__main__":
app = LybnizApp()
app.run(sys.argv)
|
"""Make plots of monthly values or differences"""
from __future__ import print_function
import calendar
from pandas.io.sql import read_sql
import matplotlib.pyplot as plt
from pyiem.util import get_dbconn
PGCONN = get_dbconn("idep")
def get_scenario(scenario):
df = read_sql(
"""
WITH yearly as (
SELECT huc_12, generate_series(2008, 2016) as yr
from huc12 where states = 'IA' and scenario = 0),
combos as (
SELECT huc_12, yr, generate_series(1, 12) as mo from yearly),
results as (
SELECT r.huc_12, extract(year from valid)::int as yr,
extract(month from valid)::int as mo,
sum(qc_precip) as precip, sum(avg_runoff) as runoff,
sum(avg_delivery) as delivery,
sum(avg_loss) as detachment from results_by_huc12 r
WHERE r.scenario = %s and r.valid >= '2008-01-01'
and r.valid < '2017-01-01' GROUP by r.huc_12, yr, mo),
agg as (
SELECT c.huc_12, c.yr, c.mo, coalesce(r.precip, 0) as precip,
coalesce(r.runoff, 0) as runoff,
coalesce(r.delivery, 0) as delivery,
coalesce(r.detachment, 0) as detachment
from combos c LEFT JOIN results r on (c.huc_12 = r.huc_12 and
c.yr = r.yr and c.mo = r.mo))
select mo,
avg(runoff) / 25.4 as runoff_in,
avg(delivery) * 4.463 as delivery_ta,
avg(detachment) * 4.463 as detachment_ta
from agg GROUP by mo ORDER by mo ASC
""",
PGCONN,
params=(scenario,),
index_col="mo",
)
return df
def main():
"""Go Main"""
adf = get_scenario(0)
b25 = get_scenario(25)
b26 = get_scenario(26)
delta25 = b25 - adf
delta26 = b26 - adf
(fig, ax) = plt.subplots(1, 1)
ax.bar(
delta25.index.values - 0.2,
delta25["delivery_ta"].values,
width=0.4,
label="HI 0.8",
)
ax.bar(
delta26.index.values + 0.2,
delta26["delivery_ta"].values,
width=0.4,
label="HI 0.9",
)
ax.legend(loc="best")
ax.grid(True)
ax.set_title("2008-2016 Change in Delivery vs DEP Baseline")
ax.set_ylabel("Change [tons/acre]")
ax.set_xticks(range(1, 13))
ax.set_xticklabels(calendar.month_abbr[1:])
fig.savefig("test.png")
if __name__ == "__main__":
main()
|
import os
import numpy as np
from keras import backend as K
def remove_files(files):
"""
Remove files from disk
args: files (str or list) remove all files in 'files'
"""
if isinstance(files, (list, tuple)):
for f in files:
if os.path.isfile(os.path.expanduser(f)):
os.remove(f)
elif isinstance(files, str):
if os.path.isfile(os.path.expanduser(files)):
os.remove(files)
def create_dir(dirs):
"""
Create directory
args: dirs (str or list) create all dirs in 'dirs'
"""
if isinstance(dirs, (list, tuple)):
for d in dirs:
if not os.path.exists(os.path.expanduser(d)):
os.makedirs(d)
elif isinstance(dirs, str):
if not os.path.exists(os.path.expanduser(dirs)):
os.makedirs(dirs)
def setup_logging(model_name):
model_dir = "../../models"
# Output path where we store experiment log and weights
model_dir = os.path.join(model_dir, model_name)
fig_dir = "../../figures"
# Create if it does not exist
create_dir([model_dir, fig_dir])
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if dim_ordering == 'th':
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
def deprocess_image(x, img_nrows, img_ncols):
if K.image_dim_ordering() == 'th':
x = x.reshape((3, img_nrows, img_ncols))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def color_correction(x, img_nrows, img_ncols, X_source):
# save current generated image
img = deprocess_image(x.copy(), img_nrows, img_ncols).astype(np.float64)
X_sourceT = X_source[0].copy().transpose(1,2,0).astype(np.float64)
# Color correction
for k in range(3):
mean, std = np.mean(X_sourceT[:, :, k]), np.std(X_sourceT[:, :, k])
img[:, :, k] *= std / np.std(img[:, :, k])
img[:, :, k] += mean - np.mean(img[:, :, k])
img = img.clip(0, 255)
return img
|
"""
Averaging Matricies
===================
Averaging matricies are used when a discrete variable living on some part of
the mesh (e.g. nodes, centers, edges or faces) must be approximated at other
locations. Averaging matricies are sparse and exist for 1D, 2D and
3D meshes. For each mesh class (*Tensor mesh*, *Tree mesh*,
*Curvilinear mesh*), the set of averaging matricies are properties that are
only constructed when called.
Here we discuss:
- How to construct and apply averaging matricies
- Averaging matricies in 1D, 2D and 3D
- Averaging discontinuous functions
- The transpose of an averaging matrix
"""
###############################################
#
# Import Packages
# ---------------
#
# Here we import the packages required for this tutorial.
#
from discretize import TensorMesh
import matplotlib.pyplot as plt
import numpy as np
# sphinx_gallery_thumbnail_number = 3
#############################################
# 1D Example
# ----------
#
# Here we compute a scalar function on cell nodes and average to cell centers.
# We then compute the scalar function at cell centers to validate the
# averaging operator.
#
# Create a uniform grid
h = 10 * np.ones(20)
mesh = TensorMesh([h], "C")
# Get node and cell center locations
x_nodes = mesh.vectorNx
x_centers = mesh.vectorCCx
# Define a continuous function
def fun(x):
return np.exp(-(x ** 2) / 50 ** 2)
# Compute function on nodes and cell centers
v_nodes = fun(x_nodes)
v_centers = fun(x_centers)
# Create operator and average from nodes to cell centers
A = mesh.aveN2CC
v_approx = A * v_nodes
# Compare
fig = plt.figure(figsize=(12, 4))
ax1 = fig.add_axes([0.03, 0.01, 0.3, 0.91])
ax1.spy(A, markersize=5)
ax1.set_title("Sparse representation of A", pad=10)
ax2 = fig.add_axes([0.4, 0.06, 0.55, 0.85])
ax2.plot(
x_centers,
v_centers,
"b-",
x_centers,
v_approx,
"ko",
x_centers,
np.c_[v_centers - v_approx],
"r-",
)
ax2.set_title("Comparison plot")
ax2.legend(("evaluated at centers", "averaged from nodes", "absolute error"))
fig.show()
#############################################
# 1D, 2D and 3D Averaging
# -----------------------
#
# Here we discuss averaging operators in 1D, 2D and 3D. In 1D we can
# average between nodes and cell centers. In higher dimensions, we may need to
# average between nodes, cell centers, faces and edges. For this example we
# describe the averaging operator from faces to cell centers in 1D, 2D and 3D.
#
# Construct uniform meshes in 1D, 2D and 3D
h = 10 * np.ones(10)
mesh1D = TensorMesh([h], x0="C")
mesh2D = TensorMesh([h, h], x0="CC")
mesh3D = TensorMesh([h, h, h], x0="CCC")
# Create averaging operators
A1 = mesh1D.aveF2CC # Averages faces (nodes in 1D) to centers
A2 = mesh2D.aveF2CC # Averages from x and y faces to centers
A3 = mesh3D.aveF2CC # Averages from x, y and z faces to centers
# Plot sparse representation
fig = plt.figure(figsize=(7, 8))
ax1 = fig.add_axes([0.37, 0.72, 0.2, 0.2])
ax1.spy(A1, markersize=2.5)
ax1.set_title("Faces to centers in 1D", pad=17)
ax2 = fig.add_axes([0.17, 0.42, 0.6, 0.22])
ax2.spy(A2, markersize=1)
ax2.set_title("Faces to centers in 2D", pad=17)
ax3 = fig.add_axes([0.05, 0, 0.93, 0.4])
ax3.spy(A3, markersize=0.5)
ax3.set_title("Faces to centers in 3D", pad=17)
fig.show()
# Print some properties
print("\n For 1D mesh:")
print("- Number of cells:", str(mesh1D.nC))
print("- Number of faces:", str(mesh1D.nF))
print("- Dimensions of operator:", str(mesh1D.nC), "x", str(mesh1D.nF))
print("- Number of non-zero elements:", str(A1.nnz), "\n")
print("For 2D mesh:")
print("- Number of cells:", str(mesh2D.nC))
print("- Number of faces:", str(mesh2D.nF))
print("- Dimensions of operator:", str(mesh2D.nC), "x", str(mesh2D.nF))
print("- Number of non-zero elements:", str(A2.nnz), "\n")
print("For 3D mesh:")
print("- Number of cells:", str(mesh3D.nC))
print("- Number of faces:", str(mesh3D.nF))
print("- Dimensions of operator:", str(mesh3D.nC), "x", str(mesh3D.nF))
print("- Number of non-zero elements:", str(A3.nnz))
######################################################
# Discontinuous Functions and the Transpose
# -----------------------------------------
#
# Here we show the effects of applying averaging operators to discontinuous
# functions. We will see that averaging smears the function at
# discontinuities.
#
# The transpose of an averaging operator is also an
# averaging operator. For example, we can average from cell centers to faces
# by taking the transpose of operator that averages from faces to cell centers.
# Note that values on the boundaries are not accurate when applying the
# transpose as an averaging operator. This is also true for staggered grids.
#
# Create mesh and obtain averaging operators
h = 2 * np.ones(50)
mesh = TensorMesh([h, h], x0="CC")
A2 = mesh.aveCC2F # cell centers to faces
A3 = mesh.aveN2CC # nodes to cell centers
A4 = mesh.aveF2CC # faces to cell centers
# Create a variable on cell centers
v = 100.0 * np.ones(mesh.nC)
xy = mesh.gridCC
v[(xy[:, 1] > 0)] = 0.0
v[(xy[:, 1] < -10.0) & (xy[:, 0] > -10.0) & (xy[:, 0] < 10.0)] = 50.0
fig = plt.figure(figsize=(10, 10))
ax1 = fig.add_subplot(221)
mesh.plotImage(v, ax=ax1)
ax1.set_title("Variable at cell centers")
# Apply cell centers to faces averaging
ax2 = fig.add_subplot(222)
mesh.plotImage(A2 * v, ax=ax2, v_type="F")
ax2.set_title("Cell centers to faces")
# Use the transpose to go from cell centers to nodes
ax3 = fig.add_subplot(223)
mesh.plotImage(A3.T * v, ax=ax3, v_type="N")
ax3.set_title("Cell centers to nodes using transpose")
# Use the transpose to go from cell centers to faces
ax4 = fig.add_subplot(224)
mesh.plotImage(A4.T * v, ax=ax4, v_type="F")
ax4.set_title("Cell centers to faces using transpose")
fig.show()
|
""" Classes and functions for generalized q-sampling """
import numpy as np
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
import warnings
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self,
gtab,
method='gqi2',
sampling_length=1.2,
normalize_peaks=False):
r""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
# 0.01506 = 6*D where D is the free water diffusion coefficient
# l_values sqrt(6 D tau) D free water diffusion coefficient and
# tau included in the b-value
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp # element-wise product
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
# print self.gqi_vector.shape
self.gqi_vector = np.real(H(np.dot(
self.model.b_vector, sphere.vertices.T) *
self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(
self.model.b_vector, sphere.vertices.T) *
self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
def squared_radial_component(x, tol=0.01):
""" Part of the GQI2 integral
Eq.8 in the referenced paper by Yeh et al. 2010
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / (x ** 3)
x_near_zero = (x < tol) & (x > -tol)
return np.where(x_near_zero, 1./3, result)
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
# odf = self.odf(s)
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = (np.sqrt(
(psi0 - psi1) ** 2 +
(psi1 - psi2) ** 2 +
(psi2 - psi0) ** 2) /
np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2)))
# print 'tom >>>> ',t0,t1,t2,npa
return t0, t1, t2, npa
def equatorial_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial zone conjugate
to 'pole' with width half 'width' degrees
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2])*v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' %
(np.array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' %
(np.array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
def patch_sum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' %
(np.array_str(pole), width))
return np.Null
return np.sum([odf[i] for i in eqvert])
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p]))
for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
|
from django.db import models
from django.db.models import Count
# Create your models here.
class Db(models.Model):
db_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
description = models.CharField(max_length=255, blank=True)
urlprefix = models.CharField(max_length=255, blank=True)
url = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'db'
class DbDbxrefCount(models.Model):
name = models.CharField(max_length=255, blank=True)
num_dbxrefs = models.BigIntegerField(null=True, blank=True)
class Meta:
db_table = u'db_dbxref_count'
class Project(models.Model):
project_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
description = models.CharField(max_length=255)
class Meta:
db_table = u'project'
class Dbxref(models.Model):
dbxref_id = models.IntegerField(primary_key=True)
db = models.ForeignKey("Db", related_name="%(class)s_db", on_delete=models.PROTECT)
accession = models.CharField(max_length=255)
version = models.CharField(max_length=255)
description = models.TextField(blank=True)
class Meta:
db_table = u'dbxref'
class Cv(models.Model):
cv_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
definition = models.TextField(blank=True)
class Meta:
db_table = u'cv'
class Cvtermsynonym(models.Model):
cvtermsynonym_id = models.IntegerField(primary_key=True)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
synonym = models.CharField(max_length=1024)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
class Meta:
db_table = u'cvtermsynonym'
class CvtermRelationship(models.Model):
cvterm_relationship_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
subject = models.ForeignKey("Cvterm", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("Cvterm", related_name="%(class)s_object", on_delete=models.PROTECT)
class Meta:
db_table = u'cvterm_relationship'
class CvtermDbxref(models.Model):
cvterm_dbxref_id = models.IntegerField(primary_key=True)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
is_for_definition = models.IntegerField()
class Meta:
db_table = u'cvterm_dbxref'
class Cvtermpath(models.Model):
cvtermpath_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
subject = models.ForeignKey("Cvterm", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("Cvterm", related_name="%(class)s_object", on_delete=models.PROTECT)
cv = models.ForeignKey("Cv", related_name="%(class)s_cv", on_delete=models.PROTECT)
pathdistance = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'cvtermpath'
class CvRoot(models.Model):
cv_id = models.IntegerField(null=True, blank=True)
root_cvterm_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'cv_root'
class Cvtermprop(models.Model):
cvtermprop_id = models.IntegerField(primary_key=True)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField()
rank = models.IntegerField()
class Meta:
db_table = u'cvtermprop'
class CvLeaf(models.Model):
cv_id = models.IntegerField(null=True, blank=True)
cvterm_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'cv_leaf'
class Dbxrefprop(models.Model):
dbxrefprop_id = models.IntegerField(primary_key=True)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField()
rank = models.IntegerField()
class Meta:
db_table = u'dbxrefprop'
class CommonAncestorCvterm(models.Model):
cvterm1_id = models.IntegerField(null=True, blank=True)
cvterm2_id = models.IntegerField(null=True, blank=True)
ancestor_cvterm_id = models.IntegerField(null=True, blank=True)
pathdistance1 = models.IntegerField(null=True, blank=True)
pathdistance2 = models.IntegerField(null=True, blank=True)
total_pathdistance = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'common_ancestor_cvterm'
class CommonDescendantCvterm(models.Model):
cvterm1_id = models.IntegerField(null=True, blank=True)
cvterm2_id = models.IntegerField(null=True, blank=True)
ancestor_cvterm_id = models.IntegerField(null=True, blank=True)
pathdistance1 = models.IntegerField(null=True, blank=True)
pathdistance2 = models.IntegerField(null=True, blank=True)
total_pathdistance = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'common_descendant_cvterm'
class StatsPathsToRoot(models.Model):
cvterm_id = models.IntegerField(null=True, blank=True)
total_paths = models.BigIntegerField(null=True, blank=True)
avg_distance = models.DecimalField(null=True, max_digits=65535, decimal_places=65535, blank=True)
min_distance = models.IntegerField(null=True, blank=True)
max_distance = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'stats_paths_to_root'
class CvCvtermCount(models.Model):
name = models.CharField(max_length=255, blank=True)
num_terms_excl_obs = models.BigIntegerField(null=True, blank=True)
class Meta:
db_table = u'cv_cvterm_count'
class CvCvtermCountWithObs(models.Model):
name = models.CharField(max_length=255, blank=True)
num_terms_incl_obs = models.BigIntegerField(null=True, blank=True)
class Meta:
db_table = u'cv_cvterm_count_with_obs'
class CvLinkCount(models.Model):
cv_name = models.CharField(max_length=255, blank=True)
relation_name = models.CharField(max_length=1024, blank=True)
relation_cv_name = models.CharField(max_length=255, blank=True)
num_links = models.BigIntegerField(null=True, blank=True)
class Meta:
db_table = u'cv_link_count'
class CvPathCount(models.Model):
cv_name = models.CharField(max_length=255, blank=True)
relation_name = models.CharField(max_length=1024, blank=True)
relation_cv_name = models.CharField(max_length=255, blank=True)
num_paths = models.BigIntegerField(null=True, blank=True)
class Meta:
db_table = u'cv_path_count'
class PubDbxref(models.Model):
pub_dbxref_id = models.IntegerField(primary_key=True)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
is_current = models.BooleanField()
class Meta:
db_table = u'pub_dbxref'
class PubRelationship(models.Model):
pub_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Pub", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("Pub", related_name="%(class)s_object", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
class Meta:
db_table = u'pub_relationship'
class Pubauthor(models.Model):
pubauthor_id = models.IntegerField(primary_key=True)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
rank = models.IntegerField()
editor = models.NullBooleanField(null=True, blank=True)
surname = models.CharField(max_length=100)
givennames = models.CharField(max_length=100, blank=True)
suffix = models.CharField(max_length=100, blank=True)
class Meta:
db_table = u'pubauthor'
class Pubprop(models.Model):
pubprop_id = models.IntegerField(primary_key=True)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField()
rank = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'pubprop'
class OrganismDbxref(models.Model):
organism_dbxref_id = models.IntegerField(primary_key=True)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
class Meta:
db_table = u'organism_dbxref'
class Organismprop(models.Model):
organismprop_id = models.IntegerField(primary_key=True)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'organismprop'
class Featureloc(models.Model):
featureloc_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
srcfeature = models.ForeignKey("Feature", null=True, blank=True, on_delete=models.PROTECT)
fmin = models.IntegerField(null=True, blank=True)
is_fmin_partial = models.BooleanField()
fmax = models.IntegerField(null=True, blank=True)
is_fmax_partial = models.BooleanField()
strand = models.SmallIntegerField(null=True, blank=True)
phase = models.IntegerField(null=True, blank=True)
residue_info = models.TextField(blank=True)
locgroup = models.IntegerField()
rank = models.IntegerField()
class Meta:
db_table = u'featureloc'
class Organism(models.Model):
organism_id = models.IntegerField(primary_key=True)
abbreviation = models.CharField(max_length=255, blank=True)
genus = models.CharField(max_length=255)
species = models.CharField(max_length=255)
common_name = models.CharField(max_length=255, blank=True)
comment = models.TextField(blank=True)
class Meta:
db_table = u'organism'
# a method that counts the number of features an organism has
def count_features(self):
return Feature.objects.filter(organism=self).count()
# a method that counts how many of each type of feature an organism has
def count_feature_types(self):
return Feature.objects.filter(organism=self).values('type__name').annotate(count=Count('type')).extra(select={'name':'type__name'})
class FeaturelocPub(models.Model):
featureloc_pub_id = models.IntegerField(primary_key=True)
featureloc = models.ForeignKey("Featureloc", related_name="%(class)s_featureloc", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'featureloc_pub'
class FeaturePub(models.Model):
feature_pub_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_pub'
class FeaturePubprop(models.Model):
feature_pubprop_id = models.IntegerField(primary_key=True)
feature_pub = models.ForeignKey("FeaturePub", related_name="%(class)s_pub", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'feature_pubprop'
class FeatureDbxref(models.Model):
feature_dbxref_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
is_current = models.BooleanField()
class Meta:
db_table = u'feature_dbxref'
class Featureprop(models.Model):
featureprop_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'featureprop'
class FeaturepropPub(models.Model):
featureprop_pub_id = models.IntegerField(primary_key=True)
featureprop = models.ForeignKey("Featureprop", related_name="%(class)s_featureprop", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'featureprop_pub'
class FeatureRelationship(models.Model):
feature_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Feature", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("Feature", related_name="%(class)s_object", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'feature_relationship'
class FeatureRelationshipPub(models.Model):
feature_relationship_pub_id = models.IntegerField(primary_key=True)
feature_relationship = models.ForeignKey("FeatureRelationship", related_name="%(class)s_relationship", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_relationship_pub'
class FeatureRelationshipprop(models.Model):
feature_relationshipprop_id = models.IntegerField(primary_key=True)
feature_relationship = models.ForeignKey("FeatureRelationship", related_name="%(class)s_relationship", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'feature_relationshipprop'
class FeatureRelationshippropPub(models.Model):
feature_relationshipprop_pub_id = models.IntegerField(primary_key=True)
feature_relationshipprop = models.ForeignKey("FeatureRelationshipprop", related_name="%(class)s_relationshipprop", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_relationshipprop_pub'
class FeatureCvtermprop(models.Model):
feature_cvtermprop_id = models.IntegerField(primary_key=True)
feature_cvterm = models.ForeignKey("FeatureCvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'feature_cvtermprop'
class FeatureCvtermDbxref(models.Model):
feature_cvterm_dbxref_id = models.IntegerField(primary_key=True)
feature_cvterm = models.ForeignKey("FeatureCvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_cvterm_dbxref'
class FeatureCvterm(models.Model):
feature_cvterm_id = models.IntegerField(primary_key=True)
#feature = models.ForeignKey("Feature", related_name="banana", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
is_not = models.BooleanField()
rank = models.IntegerField()
class Meta:
db_table = u'feature_cvterm'
class FeatureCvtermPub(models.Model):
feature_cvterm_pub_id = models.IntegerField(primary_key=True)
feature_cvterm = models.ForeignKey("FeatureCvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_cvterm_pub'
class FeatureSynonym(models.Model):
feature_synonym_id = models.IntegerField(primary_key=True)
synonym = models.ForeignKey("Synonym", related_name="%(class)s_synonym", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
is_current = models.BooleanField()
is_internal = models.BooleanField()
class Meta:
db_table = u'feature_synonym'
class TypeFeatureCount(models.Model):
type = models.CharField(max_length=1024, blank=True)
num_features = models.BigIntegerField(null=True, blank=True)
class Meta:
db_table = u'type_feature_count'
class ProteinCodingGene(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
dbxref_id = models.IntegerField(null=True, blank=True)
organism_id = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=255, blank=True)
uniquename = models.TextField(blank=True)
residues = models.TextField(blank=True)
seqlen = models.IntegerField(null=True, blank=True)
md5checksum = models.CharField(max_length=32, blank=True)
type_id = models.IntegerField(null=True, blank=True)
is_analysis = models.NullBooleanField(null=True, blank=True)
is_obsolete = models.NullBooleanField(null=True, blank=True)
timeaccessioned = models.DateTimeField(null=True, blank=True)
timelastmodified = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'protein_coding_gene'
class IntronCombinedView(models.Model):
exon1_id = models.IntegerField(null=True, blank=True)
exon2_id = models.IntegerField(null=True, blank=True)
fmin = models.IntegerField(null=True, blank=True)
fmax = models.IntegerField(null=True, blank=True)
strand = models.SmallIntegerField(null=True, blank=True)
srcfeature_id = models.IntegerField(null=True, blank=True)
intron_rank = models.IntegerField(null=True, blank=True)
transcript_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'intron_combined_view'
class IntronlocView(models.Model):
exon1_id = models.IntegerField(null=True, blank=True)
exon2_id = models.IntegerField(null=True, blank=True)
fmin = models.IntegerField(null=True, blank=True)
fmax = models.IntegerField(null=True, blank=True)
strand = models.SmallIntegerField(null=True, blank=True)
srcfeature_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'intronloc_view'
class Feature(models.Model):
feature_id = models.IntegerField(primary_key=True)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
name = models.CharField(max_length=255, blank=True)
uniquename = models.TextField()
residues = models.TextField(blank=True)
seqlen = models.IntegerField(null=True, blank=True)
md5checksum = models.CharField(max_length=32, blank=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
is_analysis = models.BooleanField()
is_obsolete = models.BooleanField()
timeaccessioned = models.DateTimeField()
timelastmodified = models.DateTimeField()
class Meta:
db_table = u'feature'
# a method that counts the number of features an organism has
def count_featurelocs(self):
return Featureloc.objects.filter(srcfeature_id=self).count()
# a method that
def get_featurelocs(self):
return Featureloc.objects.filter(srcfeature_id=self)
class Analysisprop(models.Model):
analysisprop_id = models.IntegerField(primary_key=True)
analysis = models.ForeignKey("Analysis", related_name="%(class)s_analysis", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'analysisprop'
class Analysisfeature(models.Model):
analysisfeature_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
analysis = models.ForeignKey("Analysis", related_name="%(class)s_analysis", on_delete=models.PROTECT)
rawscore = models.FloatField(null=True, blank=True)
normscore = models.FloatField(null=True, blank=True)
significance = models.FloatField(null=True, blank=True)
identity = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'analysisfeature'
class Analysis(models.Model):
analysis_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
program = models.CharField(max_length=255)
programversion = models.CharField(max_length=255)
algorithm = models.CharField(max_length=255, blank=True)
sourcename = models.CharField(max_length=255, blank=True)
sourceversion = models.CharField(max_length=255, blank=True)
sourceuri = models.TextField(blank=True)
timeexecuted = models.DateTimeField()
class Meta:
db_table = u'analysis'
class Analysisfeatureprop(models.Model):
analysisfeatureprop_id = models.IntegerField(primary_key=True)
analysisfeature = models.ForeignKey("Analysisfeature", related_name="%(class)s_analysisfeature", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'analysisfeatureprop'
class Genotype(models.Model):
genotype_id = models.IntegerField(primary_key=True)
name = models.TextField(blank=True)
uniquename = models.TextField(unique=True)
description = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'genotype'
class PhenotypeCvterm(models.Model):
phenotype_cvterm_id = models.IntegerField(primary_key=True)
phenotype = models.ForeignKey("Phenotype", related_name="%(class)s_phenotype", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
rank = models.IntegerField()
class Meta:
db_table = u'phenotype_cvterm'
class FeaturePhenotype(models.Model):
feature_phenotype_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
phenotype = models.ForeignKey("Phenotype", related_name="%(class)s_phenotype", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_phenotype'
class FeatureGenotype(models.Model):
feature_genotype_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
genotype = models.ForeignKey("Genotype", related_name="%(class)s_genotype", on_delete=models.PROTECT)
chromosome = models.ForeignKey("Feature", null=True, blank=True, on_delete=models.PROTECT)
rank = models.IntegerField()
cgroup = models.IntegerField()
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_genotype'
class Environment(models.Model):
environment_id = models.IntegerField(primary_key=True)
uniquename = models.TextField(unique=True)
description = models.TextField(blank=True)
class Meta:
db_table = u'environment'
class EnvironmentCvterm(models.Model):
environment_cvterm_id = models.IntegerField(primary_key=True)
environment = models.ForeignKey("Environment", related_name="%(class)s_environment", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
class Meta:
db_table = u'environment_cvterm'
class Phenstatement(models.Model):
phenstatement_id = models.IntegerField(primary_key=True)
genotype = models.ForeignKey("Genotype", related_name="%(class)s_genotype", on_delete=models.PROTECT)
environment = models.ForeignKey("Environment", related_name="%(class)s_environment", on_delete=models.PROTECT)
phenotype = models.ForeignKey("Phenotype", related_name="%(class)s_phenotype", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'phenstatement'
class PhenotypeComparison(models.Model):
phenotype_comparison_id = models.IntegerField(primary_key=True)
genotype1 = models.ForeignKey("Genotype", related_name="%(class)s_genotype1", on_delete=models.PROTECT)
environment1 = models.ForeignKey("Environment", related_name="%(class)s_environment1", on_delete=models.PROTECT)
genotype2 = models.ForeignKey("Genotype", related_name="%(class)s_genotype2", on_delete=models.PROTECT)
environment2 = models.ForeignKey("Environment", related_name="%(class)s_environment2", on_delete=models.PROTECT)
phenotype1 = models.ForeignKey("Phenotype", related_name="%(class)s_phenotype1", on_delete=models.PROTECT)
phenotype2 = models.ForeignKey("Phenotype", null=True, blank=True, on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
class Meta:
db_table = u'phenotype_comparison'
class Phenotype(models.Model):
phenotype_id = models.IntegerField(primary_key=True)
uniquename = models.TextField(unique=True)
observable = models.ForeignKey("Cvterm", related_name="%(class)s_observable", null=True, blank=True, on_delete=models.PROTECT)
attr = models.ForeignKey("Cvterm", related_name="%(class)s_attr", null=True, blank=True, on_delete=models.PROTECT)
value = models.TextField(blank=True)
cvalue = models.ForeignKey("Cvterm",related_name="%(class)s_cvalue", null=True, blank=True, on_delete=models.PROTECT)
assay = models.ForeignKey("Cvterm",related_name="%(class)s_assay", null=True, blank=True, on_delete=models.PROTECT)
class Meta:
db_table = u'phenotype'
class Phendesc(models.Model):
phendesc_id = models.IntegerField(primary_key=True)
genotype = models.ForeignKey("Genotype", related_name="%(class)s_genotype", on_delete=models.PROTECT)
environment = models.ForeignKey("Environment", related_name="%(class)s_environment", on_delete=models.PROTECT)
description = models.TextField()
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'phendesc'
class SouthMigrationhistory(models.Model):
id = models.IntegerField(primary_key=True)
app_name = models.CharField(max_length=255)
migration = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
db_table = u'south_migrationhistory'
class PhenotypeComparisonCvterm(models.Model):
phenotype_comparison_cvterm_id = models.IntegerField(primary_key=True)
phenotype_comparison = models.ForeignKey("PhenotypeComparison", related_name="%(class)s_comparison", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
rank = models.IntegerField()
class Meta:
db_table = u'phenotype_comparison_cvterm'
class Cvterm(models.Model):
cvterm_id = models.IntegerField(primary_key=True)
cv = models.ForeignKey("Cv", related_name="%(class)s_cv", on_delete=models.PROTECT)
name = models.CharField(max_length=1024)
definition = models.TextField(blank=True)
dbxref = models.ForeignKey("Dbxref", unique=True, on_delete=models.PROTECT)
is_obsolete = models.IntegerField()
is_relationshiptype = models.IntegerField()
class Meta:
db_table = u'cvterm'
class Featuremap(models.Model):
featuremap_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, unique=True, blank=True)
description = models.TextField(blank=True)
unittype = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
class Meta:
db_table = u'featuremap'
class Pub(models.Model):
pub_id = models.IntegerField(primary_key=True)
title = models.TextField(blank=True)
volumetitle = models.TextField(blank=True)
volume = models.CharField(max_length=255, blank=True)
series_name = models.CharField(max_length=255, blank=True)
issue = models.CharField(max_length=255, blank=True)
pyear = models.CharField(max_length=255, blank=True)
pages = models.CharField(max_length=255, blank=True)
miniref = models.CharField(max_length=255, blank=True)
uniquename = models.TextField(unique=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
is_obsolete = models.NullBooleanField(null=True, blank=True)
publisher = models.CharField(max_length=255, blank=True)
pubplace = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'pub'
class Featurerange(models.Model):
featurerange_id = models.IntegerField(primary_key=True)
featuremap = models.ForeignKey("Featuremap", related_name="%(class)s_featuremap", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
leftstartf = models.ForeignKey("Feature", related_name="%(class)s_leftstartf", on_delete=models.PROTECT)
leftendf = models.ForeignKey("Feature", related_name="%(class)s_leftendf", null=True, blank=True, on_delete=models.PROTECT)
rightstartf = models.ForeignKey("Feature", related_name="%(class)s_rightstartf", null=True, blank=True, on_delete=models.PROTECT)
rightendf = models.ForeignKey("Feature", related_name="%(class)s_rightendf", on_delete=models.PROTECT)
rangestr = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'featurerange'
class Featurepos(models.Model):
featurepos_id = models.IntegerField(primary_key=True)
featuremap = models.ForeignKey("Featuremap", related_name="%(class)s_featuremap", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
map_feature = models.ForeignKey("Feature", related_name="%(class)s_map_feature", on_delete=models.PROTECT)
mappos = models.FloatField()
class Meta:
db_table = u'featurepos'
class FeaturemapPub(models.Model):
featuremap_pub_id = models.IntegerField(primary_key=True)
featuremap = models.ForeignKey("Featuremap", related_name="%(class)s_featuremap", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'featuremap_pub'
class PhylotreePub(models.Model):
phylotree_pub_id = models.IntegerField(primary_key=True)
phylotree = models.ForeignKey("Phylotree", related_name="%(class)s_phylotree", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'phylotree_pub'
class PhylonodeDbxref(models.Model):
phylonode_dbxref_id = models.IntegerField(primary_key=True)
phylonode = models.ForeignKey("Phylonode", related_name="%(class)s_phylonode", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
class Meta:
db_table = u'phylonode_dbxref'
class PhylonodePub(models.Model):
phylonode_pub_id = models.IntegerField(primary_key=True)
phylonode = models.ForeignKey("Phylonode", related_name="%(class)s_phylonode", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'phylonode_pub'
class PhylonodeOrganism(models.Model):
phylonode_organism_id = models.IntegerField(primary_key=True)
phylonode = models.ForeignKey("Phylonode", unique=True, on_delete=models.PROTECT)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
class Meta:
db_table = u'phylonode_organism'
class Phylonodeprop(models.Model):
phylonodeprop_id = models.IntegerField(primary_key=True)
phylonode = models.ForeignKey("Phylonode", related_name="%(class)s_phylonode", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField()
rank = models.IntegerField()
class Meta:
db_table = u'phylonodeprop'
class Phylonode(models.Model):
phylonode_id = models.IntegerField(primary_key=True)
phylotree = models.ForeignKey("Phylotree", related_name="%(class)s_phylotree", on_delete=models.PROTECT)
parent_phylonode = models.ForeignKey('self', null=True, blank=True, on_delete=models.PROTECT)
left_idx = models.IntegerField()
right_idx = models.IntegerField()
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", null=True, blank=True, on_delete=models.PROTECT)
label = models.CharField(max_length=255, blank=True)
distance = models.FloatField(null=True, blank=True)
class Meta:
db_table = u'phylonode'
class Phylotree(models.Model):
phylotree_id = models.IntegerField(primary_key=True)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
name = models.CharField(max_length=255, blank=True)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
analysis = models.ForeignKey("Analysis", null=True, blank=True, on_delete=models.PROTECT)
comment = models.TextField(blank=True)
class Meta:
db_table = u'phylotree'
class PhylonodeRelationship(models.Model):
phylonode_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("phylonode", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("phylonode", related_name="%(class)s_object", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
rank = models.IntegerField(null=True, blank=True)
phylotree = models.ForeignKey("phylotree", related_name="%(class)s_phylotree", on_delete=models.PROTECT)
class Meta:
db_table = u'phylonode_relationship'
class Contact(models.Model):
contact_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
name = models.CharField(max_length=255, unique=True)
description = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'contact'
class ContactRelationship(models.Model):
contact_relationship_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
subject = models.ForeignKey("Contact", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("Contact", related_name="%(class)s_object", on_delete=models.PROTECT)
class Meta:
db_table = u'contact_relationship'
class ExpressionPub(models.Model):
expression_pub_id = models.IntegerField(primary_key=True)
expression = models.ForeignKey("Expression", related_name="%(class)s_expression", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'expression_pub'
class ExpressionCvterm(models.Model):
expression_cvterm_id = models.IntegerField(primary_key=True)
expression = models.ForeignKey("Expression", related_name="%(class)s_expression", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
rank = models.IntegerField()
cvterm_type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
class Meta:
db_table = u'expression_cvterm'
class ExpressionCvtermprop(models.Model):
expression_cvtermprop_id = models.IntegerField(primary_key=True)
expression_cvterm = models.ForeignKey("ExpressionCvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'expression_cvtermprop'
class Expressionprop(models.Model):
expressionprop_id = models.IntegerField(primary_key=True)
expression = models.ForeignKey("Expression", related_name="%(class)s_expression", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'expressionprop'
class FeatureExpression(models.Model):
feature_expression_id = models.IntegerField(primary_key=True)
expression = models.ForeignKey("Expression", related_name="%(class)s_expression", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'feature_expression'
class FeatureExpressionprop(models.Model):
feature_expressionprop_id = models.IntegerField(primary_key=True)
feature_expression = models.ForeignKey("FeatureExpression", related_name="%(class)s_expression", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'feature_expressionprop'
class Expression(models.Model):
expression_id = models.IntegerField(primary_key=True)
uniquename = models.TextField(unique=True)
md5checksum = models.CharField(max_length=32, blank=True)
description = models.TextField(blank=True)
class Meta:
db_table = u'expression'
class Eimage(models.Model):
eimage_id = models.IntegerField(primary_key=True)
eimage_data = models.TextField(blank=True)
eimage_type = models.CharField(max_length=255)
image_uri = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'eimage'
class ExpressionImage(models.Model):
expression_image_id = models.IntegerField(primary_key=True)
expression = models.ForeignKey("Expression", related_name="%(class)s_expression", on_delete=models.PROTECT)
eimage = models.ForeignKey("Eimage", related_name="%(class)s_eimage", on_delete=models.PROTECT)
class Meta:
db_table = u'expression_image'
class Mageml(models.Model):
mageml_id = models.IntegerField(primary_key=True)
mage_package = models.TextField()
mage_ml = models.TextField()
class Meta:
db_table = u'mageml'
class Magedocumentation(models.Model):
magedocumentation_id = models.IntegerField(primary_key=True)
mageml = models.ForeignKey("Mageml", related_name="%(class)s_mageml", on_delete=models.PROTECT)
tableinfo = models.ForeignKey("Tableinfo", related_name="%(class)s_tableinfo", on_delete=models.PROTECT)
row_id = models.IntegerField()
mageidentifier = models.TextField()
class Meta:
db_table = u'magedocumentation'
class Channel(models.Model):
channel_id = models.IntegerField(primary_key=True)
name = models.TextField(unique=True)
definition = models.TextField()
class Meta:
db_table = u'channel'
class Protocolparam(models.Model):
protocolparam_id = models.IntegerField(primary_key=True)
protocol = models.ForeignKey("Protocol", related_name="%(class)s_protocol", on_delete=models.PROTECT)
name = models.TextField()
datatype = models.ForeignKey("Cvterm", related_name="%(class)s_datatype", null=True, blank=True, on_delete=models.PROTECT)
unittype = models.ForeignKey("Cvterm", related_name="%(class)s_unittype", null=True, blank=True, on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'protocolparam'
class Tableinfo(models.Model):
tableinfo_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=30, unique=True)
primary_key_column = models.CharField(max_length=30, blank=True)
is_view = models.IntegerField()
view_on_table_id = models.IntegerField(null=True, blank=True)
superclass_table_id = models.IntegerField(null=True, blank=True)
is_updateable = models.IntegerField()
modification_date = models.DateField()
class Meta:
db_table = u'tableinfo'
class Arraydesign(models.Model):
arraydesign_id = models.IntegerField(primary_key=True)
manufacturer = models.ForeignKey("Contact", related_name="%(class)s_manufacturer", on_delete=models.PROTECT)
platformtype = models.ForeignKey("Cvterm", related_name="%(class)s_platformtype", on_delete=models.PROTECT)
substratetype = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
protocol = models.ForeignKey("Protocol", null=True, blank=True, on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField(unique=True)
version = models.TextField(blank=True)
description = models.TextField(blank=True)
array_dimensions = models.TextField(blank=True)
element_dimensions = models.TextField(blank=True)
num_of_elements = models.IntegerField(null=True, blank=True)
num_array_columns = models.IntegerField(null=True, blank=True)
num_array_rows = models.IntegerField(null=True, blank=True)
num_grid_columns = models.IntegerField(null=True, blank=True)
num_grid_rows = models.IntegerField(null=True, blank=True)
num_sub_columns = models.IntegerField(null=True, blank=True)
num_sub_rows = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'arraydesign'
class Assayprop(models.Model):
assayprop_id = models.IntegerField(primary_key=True)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'assayprop'
class Arraydesignprop(models.Model):
arraydesignprop_id = models.IntegerField(primary_key=True)
arraydesign = models.ForeignKey("Arraydesign", related_name="%(class)s_arraydesign", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'arraydesignprop'
class AssayProject(models.Model):
assay_project_id = models.IntegerField(primary_key=True)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
project = models.ForeignKey("Project", related_name="%(class)s_project", on_delete=models.PROTECT)
class Meta:
db_table = u'assay_project'
class Assay(models.Model):
assay_id = models.IntegerField(primary_key=True)
arraydesign = models.ForeignKey("Arraydesign", related_name="%(class)s_arraydesign", on_delete=models.PROTECT)
protocol = models.ForeignKey("Protocol", null=True, blank=True, on_delete=models.PROTECT)
assaydate = models.DateTimeField(null=True, blank=True)
arrayidentifier = models.TextField(blank=True)
arraybatchidentifier = models.TextField(blank=True)
operator = models.ForeignKey("Contact", related_name="%(class)s_operator", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField(unique=True, blank=True)
description = models.TextField(blank=True)
class Meta:
db_table = u'assay'
class BiomaterialDbxref(models.Model):
biomaterial_dbxref_id = models.IntegerField(primary_key=True)
biomaterial = models.ForeignKey("Biomaterial", related_name="%(class)s_biomaterial", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
class Meta:
db_table = u'biomaterial_dbxref'
class BiomaterialRelationship(models.Model):
biomaterial_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Biomaterial", related_name="%(class)s_subject", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
object = models.ForeignKey("Biomaterial", related_name="%(class)s_object", on_delete=models.PROTECT)
class Meta:
db_table = u'biomaterial_relationship'
class Biomaterial(models.Model):
biomaterial_id = models.IntegerField(primary_key=True)
taxon = models.ForeignKey("Organism", null=True, blank=True, on_delete=models.PROTECT)
biosourceprovider = models.ForeignKey("Contact", null=True, blank=True, on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField(unique=True, blank=True)
description = models.TextField(blank=True)
class Meta:
db_table = u'biomaterial'
class Biomaterialprop(models.Model):
biomaterialprop_id = models.IntegerField(primary_key=True)
biomaterial = models.ForeignKey("Biomaterial", related_name="%(class)s_biomaterial", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'biomaterialprop'
class Treatment(models.Model):
treatment_id = models.IntegerField(primary_key=True)
rank = models.IntegerField()
biomaterial = models.ForeignKey("Biomaterial", related_name="%(class)s_biomaterial", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
protocol = models.ForeignKey("Protocol", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField(blank=True)
class Meta:
db_table = u'treatment'
class BiomaterialTreatment(models.Model):
biomaterial_treatment_id = models.IntegerField(primary_key=True)
biomaterial = models.ForeignKey("Biomaterial", related_name="%(class)s_biomaterial", on_delete=models.PROTECT)
treatment = models.ForeignKey("Treatment", related_name="%(class)s_treatment", on_delete=models.PROTECT)
unittype = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
value = models.FloatField(null=True, blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'biomaterial_treatment'
class Acquisitionprop(models.Model):
acquisitionprop_id = models.IntegerField(primary_key=True)
acquisition = models.ForeignKey("Acquisition", related_name="%(class)s_acquisition", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'acquisitionprop'
class AssayBiomaterial(models.Model):
assay_biomaterial_id = models.IntegerField(primary_key=True)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
biomaterial = models.ForeignKey("Biomaterial", related_name="%(class)s_biomaterial", on_delete=models.PROTECT)
channel = models.ForeignKey("Channel", null=True, blank=True, on_delete=models.PROTECT)
rank = models.IntegerField()
class Meta:
db_table = u'assay_biomaterial'
class AcquisitionRelationship(models.Model):
acquisition_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Acquisition", related_name="%(class)s_subject", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
object = models.ForeignKey("Acquisition", related_name="%(class)s_object", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'acquisition_relationship'
class Acquisition(models.Model):
acquisition_id = models.IntegerField(primary_key=True)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
protocol = models.ForeignKey("Protocol", null=True, blank=True, on_delete=models.PROTECT)
channel = models.ForeignKey("Channel", null=True, blank=True, on_delete=models.PROTECT)
acquisitiondate = models.DateTimeField(null=True, blank=True)
name = models.TextField(unique=True, blank=True)
uri = models.TextField(blank=True)
class Meta:
db_table = u'acquisition'
class Protocol(models.Model):
protocol_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", null=True, blank=True, on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField(unique=True)
uri = models.TextField(blank=True)
protocoldescription = models.TextField(blank=True)
hardwaredescription = models.TextField(blank=True)
softwaredescription = models.TextField(blank=True)
class Meta:
db_table = u'protocol'
class Quantificationprop(models.Model):
quantificationprop_id = models.IntegerField(primary_key=True)
quantification = models.ForeignKey("Quantification", related_name="%(class)s_quantification", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'quantificationprop'
class Control(models.Model):
control_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
tableinfo = models.ForeignKey("Tableinfo", related_name="%(class)s_tableinfo", on_delete=models.PROTECT)
row_id = models.IntegerField()
name = models.TextField(blank=True)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'control'
class QuantificationRelationship(models.Model):
quantification_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Quantification", related_name="%(class)s_subject", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
object = models.ForeignKey("Quantification", related_name="%(class)s_object", on_delete=models.PROTECT)
class Meta:
db_table = u'quantification_relationship'
class Element(models.Model):
element_id = models.IntegerField(primary_key=True)
feature = models.ForeignKey("Feature", null=True, blank=True, on_delete=models.PROTECT)
arraydesign = models.ForeignKey("Arraydesign", related_name="%(class)s_arraydesign", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
class Meta:
db_table = u'element'
class Quantification(models.Model):
quantification_id = models.IntegerField(primary_key=True)
acquisition = models.ForeignKey("Acquisition", related_name="%(class)s_acquisition", on_delete=models.PROTECT)
operator = models.ForeignKey("Contact", null=True, blank=True, on_delete=models.PROTECT)
protocol = models.ForeignKey("Protocol", null=True, blank=True, on_delete=models.PROTECT)
analysis = models.ForeignKey("Analysis", related_name="%(class)s_analysis", on_delete=models.PROTECT)
quantificationdate = models.DateTimeField(null=True, blank=True)
name = models.TextField(blank=True)
uri = models.TextField(blank=True)
class Meta:
db_table = u'quantification'
class ElementRelationship(models.Model):
element_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Element", related_name="%(class)s_subject", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
object = models.ForeignKey("Element", related_name="%(class)s_object", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'element_relationship'
class StudyAssay(models.Model):
study_assay_id = models.IntegerField(primary_key=True)
study = models.ForeignKey("Study", related_name="%(class)s_study", on_delete=models.PROTECT)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
class Meta:
db_table = u'study_assay'
class Elementresult(models.Model):
elementresult_id = models.IntegerField(primary_key=True)
element = models.ForeignKey("Element", related_name="%(class)s_element", on_delete=models.PROTECT)
quantification = models.ForeignKey("Quantification", related_name="%(class)s_quantification", on_delete=models.PROTECT)
signal = models.FloatField()
class Meta:
db_table = u'elementresult'
class ElementresultRelationship(models.Model):
elementresult_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Elementresult", related_name="%(class)s_subject", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
object = models.ForeignKey("Elementresult", related_name="%(class)s_object", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'elementresult_relationship'
class Study(models.Model):
study_id = models.IntegerField(primary_key=True)
contact = models.ForeignKey("Contact", related_name="%(class)s_contact", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", null=True, blank=True, on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField(unique=True)
description = models.TextField(blank=True)
class Meta:
db_table = u'study'
class Studydesignprop(models.Model):
studydesignprop_id = models.IntegerField(primary_key=True)
studydesign = models.ForeignKey("Studydesign", related_name="%(class)s_studydesign", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'studydesignprop'
class Studydesign(models.Model):
studydesign_id = models.IntegerField(primary_key=True)
study = models.ForeignKey("Study", related_name="%(class)s_study", on_delete=models.PROTECT)
description = models.TextField(blank=True)
class Meta:
db_table = u'studydesign'
class Studyfactor(models.Model):
studyfactor_id = models.IntegerField(primary_key=True)
studydesign = models.ForeignKey("Studydesign", related_name="%(class)s_studydesign", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
name = models.TextField()
description = models.TextField(blank=True)
class Meta:
db_table = u'studyfactor'
class Studyfactorvalue(models.Model):
studyfactorvalue_id = models.IntegerField(primary_key=True)
studyfactor = models.ForeignKey("Studyfactor", related_name="%(class)s_studyfactor", on_delete=models.PROTECT)
assay = models.ForeignKey("Assay", related_name="%(class)s_assay", on_delete=models.PROTECT)
factorvalue = models.TextField(blank=True)
name = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'studyfactorvalue'
class Studyprop(models.Model):
studyprop_id = models.IntegerField(primary_key=True)
study = models.ForeignKey("Study", related_name="%(class)s_study", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'studyprop'
class StudypropFeature(models.Model):
studyprop_feature_id = models.IntegerField(primary_key=True)
studyprop = models.ForeignKey("Studyprop", related_name="%(class)s_studyprop", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", null=True, blank=True, on_delete=models.PROTECT)
class Meta:
db_table = u'studyprop_feature'
class StockpropPub(models.Model):
stockprop_pub_id = models.IntegerField(primary_key=True)
stockprop = models.ForeignKey("Stockprop", related_name="%(class)s_stockprop", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'stockprop_pub'
class StockPub(models.Model):
stock_pub_id = models.IntegerField(primary_key=True)
stock = models.ForeignKey("Stock", related_name="%(class)s_stock", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'stock_pub'
class Stockprop(models.Model):
stockprop_id = models.IntegerField(primary_key=True)
stock = models.ForeignKey("Stock", related_name="%(class)s_stock", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'stockprop'
class StockRelationship(models.Model):
stock_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("Stock", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("Stock", related_name="%(class)s_object", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'stock_relationship'
class StockRelationshipPub(models.Model):
stock_relationship_pub_id = models.IntegerField(primary_key=True)
stock_relationship = models.ForeignKey("StockRelationship", related_name="%(class)s_relationship", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'stock_relationship_pub'
class Stock(models.Model):
stock_id = models.IntegerField(primary_key=True)
dbxref = models.ForeignKey("Dbxref", null=True, blank=True, on_delete=models.PROTECT)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
name = models.CharField(max_length=255, blank=True)
uniquename = models.TextField()
description = models.TextField(blank=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
is_obsolete = models.BooleanField()
class Meta:
db_table = u'stock'
class StockDbxref(models.Model):
stock_dbxref_id = models.IntegerField(primary_key=True)
stock = models.ForeignKey("Stock", related_name="%(class)s_stock", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
is_current = models.BooleanField()
class Meta:
db_table = u'stock_dbxref'
class StockCvterm(models.Model):
stock_cvterm_id = models.IntegerField(primary_key=True)
stock = models.ForeignKey("Stock", related_name="%(class)s_stock", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'stock_cvterm'
class Stockcollection(models.Model):
stockcollection_id = models.IntegerField(primary_key=True)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
contact = models.ForeignKey("Contact", null=True, blank=True, on_delete=models.PROTECT)
name = models.CharField(max_length=255, blank=True)
uniquename = models.TextField()
class Meta:
db_table = u'stockcollection'
class StockGenotype(models.Model):
stock_genotype_id = models.IntegerField(primary_key=True)
stock = models.ForeignKey("Stock", related_name="%(class)s_stock", on_delete=models.PROTECT)
genotype = models.ForeignKey("Genotype", related_name="%(class)s_genotype", on_delete=models.PROTECT)
class Meta:
db_table = u'stock_genotype'
class Stockcollectionprop(models.Model):
stockcollectionprop_id = models.IntegerField(primary_key=True)
stockcollection = models.ForeignKey("Stockcollection", related_name="%(class)s_stockcollection", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'stockcollectionprop'
class StockcollectionStock(models.Model):
stockcollection_stock_id = models.IntegerField(primary_key=True)
stockcollection = models.ForeignKey("Stockcollection", related_name="%(class)s_stockcollection", on_delete=models.PROTECT)
stock = models.ForeignKey("Stock", related_name="%(class)s_stock", on_delete=models.PROTECT)
class Meta:
db_table = u'stockcollection_stock'
class LibrarySynonym(models.Model):
library_synonym_id = models.IntegerField(primary_key=True)
synonym = models.ForeignKey("Synonym", related_name="%(class)s_synonym", on_delete=models.PROTECT)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
is_current = models.BooleanField()
is_internal = models.BooleanField()
class Meta:
db_table = u'library_synonym'
class Libraryprop(models.Model):
libraryprop_id = models.IntegerField(primary_key=True)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'libraryprop'
class LibraryPub(models.Model):
library_pub_id = models.IntegerField(primary_key=True)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'library_pub'
class LibrarypropPub(models.Model):
libraryprop_pub_id = models.IntegerField(primary_key=True)
libraryprop = models.ForeignKey("Libraryprop", related_name="%(class)s_libraryprop", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'libraryprop_pub'
class LibraryCvterm(models.Model):
library_cvterm_id = models.IntegerField(primary_key=True)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'library_cvterm'
class Synonym(models.Model):
synonym_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
synonym_sgml = models.CharField(max_length=255)
class Meta:
db_table = u'synonym'
class Library(models.Model):
library_id = models.IntegerField(primary_key=True)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
name = models.CharField(max_length=255, blank=True)
uniquename = models.TextField()
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
is_obsolete = models.IntegerField()
timeaccessioned = models.DateTimeField()
timelastmodified = models.DateTimeField()
class Meta:
db_table = u'library'
class LibraryFeature(models.Model):
library_feature_id = models.IntegerField(primary_key=True)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
class Meta:
db_table = u'library_feature'
class LibraryDbxref(models.Model):
library_dbxref_id = models.IntegerField(primary_key=True)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
is_current = models.BooleanField()
class Meta:
db_table = u'library_dbxref'
class CellLineSynonym(models.Model):
cell_line_synonym_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
synonym = models.ForeignKey("Synonym", related_name="%(class)s_synonym", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
is_current = models.BooleanField()
is_internal = models.BooleanField()
class Meta:
db_table = u'cell_line_synonym'
class CellLineCvterm(models.Model):
cell_line_cvterm_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
cvterm = models.ForeignKey("Cvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
rank = models.IntegerField()
class Meta:
db_table = u'cell_line_cvterm'
class CellLineDbxref(models.Model):
cell_line_dbxref_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
dbxref = models.ForeignKey("Dbxref", related_name="%(class)s_dbxref", on_delete=models.PROTECT)
is_current = models.BooleanField()
class Meta:
db_table = u'cell_line_dbxref'
class CellLineRelationship(models.Model):
cell_line_relationship_id = models.IntegerField(primary_key=True)
subject = models.ForeignKey("CellLine", related_name="%(class)s_subject", on_delete=models.PROTECT)
object = models.ForeignKey("CellLine", related_name="%(class)s_object", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
class Meta:
db_table = u'cell_line_relationship'
class CellLine(models.Model):
cell_line_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, blank=True)
uniquename = models.CharField(max_length=255)
organism = models.ForeignKey("Organism", related_name="%(class)s_organism", on_delete=models.PROTECT)
timeaccessioned = models.DateTimeField()
timelastmodified = models.DateTimeField()
class Meta:
db_table = u'cell_line'
class CellLineprop(models.Model):
cell_lineprop_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'cell_lineprop'
class CellLinepropPub(models.Model):
cell_lineprop_pub_id = models.IntegerField(primary_key=True)
cell_lineprop = models.ForeignKey("CellLineprop", related_name="%(class)s_lineprop", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'cell_lineprop_pub'
class FpKey(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
pkey = models.CharField(max_length=1024, blank=True)
value = models.TextField(blank=True)
class Meta:
db_table = u'fp_key'
class CellLineLibrary(models.Model):
cell_line_library_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
library = models.ForeignKey("Library", related_name="%(class)s_library", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'cell_line_library'
class Gffatts(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
type = models.TextField(blank=True)
attribute = models.CharField(max_length=100, blank=True) # Changed max length from -1 to 100
class Meta:
db_table = u'gffatts'
class Gff3Atts(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
type = models.TextField(blank=True)
attribute = models.CharField(max_length=100, blank=True) # Changed max length from -1 to 100
class Meta:
db_table = u'gff3atts'
class CellLineFeature(models.Model):
cell_line_feature_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
feature = models.ForeignKey("Feature", related_name="%(class)s_feature", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'cell_line_feature'
class Gff3View(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
ref = models.CharField(max_length=255, blank=True)
source = models.CharField(max_length=255, blank=True)
type = models.CharField(max_length=1024, blank=True)
fstart = models.IntegerField(null=True, blank=True)
fend = models.IntegerField(null=True, blank=True)
score = models.FloatField(null=True, blank=True)
strand = models.SmallIntegerField(null=True, blank=True)
phase = models.IntegerField(null=True, blank=True)
seqlen = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=255, blank=True)
organism_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'gff3view'
class AllFeatureNames(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=255, blank=True)
organism_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'all_feature_names'
class CellLineCvtermprop(models.Model):
cell_line_cvtermprop_id = models.IntegerField(primary_key=True)
cell_line_cvterm = models.ForeignKey("CellLineCvterm", related_name="%(class)s_cvterm", on_delete=models.PROTECT)
type = models.ForeignKey("Cvterm", related_name="%(class)s_type", on_delete=models.PROTECT)
value = models.TextField(blank=True)
rank = models.IntegerField()
class Meta:
db_table = u'cell_line_cvtermprop'
class Dfeatureloc(models.Model):
featureloc_id = models.IntegerField(null=True, blank=True)
feature_id = models.IntegerField(null=True, blank=True)
srcfeature_id = models.IntegerField(null=True, blank=True)
nbeg = models.IntegerField(null=True, blank=True)
is_nbeg_partial = models.NullBooleanField(null=True, blank=True)
nend = models.IntegerField(null=True, blank=True)
is_nend_partial = models.NullBooleanField(null=True, blank=True)
strand = models.SmallIntegerField(null=True, blank=True)
phase = models.IntegerField(null=True, blank=True)
residue_info = models.TextField(blank=True)
locgroup = models.IntegerField(null=True, blank=True)
rank = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'dfeatureloc'
class FType(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=255, blank=True)
dbxref_id = models.IntegerField(null=True, blank=True)
type = models.CharField(max_length=1024, blank=True)
residues = models.TextField(blank=True)
seqlen = models.IntegerField(null=True, blank=True)
md5checksum = models.CharField(max_length=32, blank=True)
type_id = models.IntegerField(null=True, blank=True)
timeaccessioned = models.DateTimeField(null=True, blank=True)
timelastmodified = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'f_type'
class CellLinePub(models.Model):
cell_line_pub_id = models.IntegerField(primary_key=True)
cell_line = models.ForeignKey("CellLine", related_name="%(class)s_line", on_delete=models.PROTECT)
pub = models.ForeignKey("Pub", related_name="%(class)s_pub", on_delete=models.PROTECT)
class Meta:
db_table = u'cell_line_pub'
class FeatureMeets(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_meets'
class FeatureMeetsOnSameStrand(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_meets_on_same_strand'
class FnrType(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=255, blank=True)
dbxref_id = models.IntegerField(null=True, blank=True)
type = models.CharField(max_length=1024, blank=True)
residues = models.TextField(blank=True)
seqlen = models.IntegerField(null=True, blank=True)
md5checksum = models.CharField(max_length=32, blank=True)
type_id = models.IntegerField(null=True, blank=True)
timeaccessioned = models.DateTimeField(null=True, blank=True)
timelastmodified = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'fnr_type'
class FLoc(models.Model):
feature_id = models.IntegerField(null=True, blank=True)
name = models.CharField(max_length=255, blank=True)
dbxref_id = models.IntegerField(null=True, blank=True)
nbeg = models.IntegerField(null=True, blank=True)
nend = models.IntegerField(null=True, blank=True)
strand = models.SmallIntegerField(null=True, blank=True)
class Meta:
db_table = u'f_loc'
class FeatureDisjoint(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_disjoint'
class FeatureUnion(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
srcfeature_id = models.IntegerField(null=True, blank=True)
subject_strand = models.SmallIntegerField(null=True, blank=True)
object_strand = models.SmallIntegerField(null=True, blank=True)
fmin = models.IntegerField(null=True, blank=True)
fmax = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_union'
class FeatureIntersection(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
srcfeature_id = models.IntegerField(null=True, blank=True)
subject_strand = models.SmallIntegerField(null=True, blank=True)
object_strand = models.SmallIntegerField(null=True, blank=True)
fmin = models.IntegerField(null=True, blank=True)
fmax = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_intersection'
class FeatureDifference(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
srcfeature_id = models.SmallIntegerField(null=True, blank=True)
fmin = models.IntegerField(null=True, blank=True)
fmax = models.IntegerField(null=True, blank=True)
strand = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_difference'
class FeatureDistance(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
srcfeature_id = models.IntegerField(null=True, blank=True)
subject_strand = models.SmallIntegerField(null=True, blank=True)
object_strand = models.SmallIntegerField(null=True, blank=True)
distance = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_distance'
class FeatureContains(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'feature_contains'
class FeaturesetMeets(models.Model):
subject_id = models.IntegerField(null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'featureset_meets'
class GeneOrder(models.Model):
gene_order_id = models.IntegerField(primary_key=True)
chromosome = models.ForeignKey("Feature", related_name="%(class)s_chromosome", on_delete=models.PROTECT)
gene = models.ForeignKey("Feature", related_name="%(class)s_gene", on_delete=models.PROTECT)
number = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'gene_order'
class GeneFamilyAssignment(models.Model):
gene_family_assignment_id = models.IntegerField(primary_key=True)
gene = models.ForeignKey("Feature", related_name="%(class)s_gene", on_delete=models.PROTECT)
family_label = models.TextField(blank=False)
class Meta:
db_table = u'gene_family_assignment'
|
import time
from salad.exceptions import TimeoutException
POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method
class SaladWaiter(object):
def __init__(self, timeout, poll_frequency=POLL_FREQUENCY,
ignored_exceptions=None):
"""Constructor
Args:
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes
ignored during calls.
Example:
from salad.waiter import SaladWaiter
element = SaladWaiter(10).until(False, some_method, arg1, arg2,..)
"""
self._timeout = timeout
self._poll = poll_frequency
# avoid the divide by zero
if self._poll == 0:
self._poll = POLL_FREQUENCY
exceptions = []
if ignored_exceptions is not None:
try:
exceptions.extend(iter(ignored_exceptions))
except TypeError: # ignored_exceptions is not iterable
exceptions.append(ignored_exceptions)
self._ignored_exceptions = tuple(exceptions)
def _until(self, negate, method, *args):
"""The provided method should return either True or False.
It is then called until proper return value appears according to negate
OR until timeout happens"""
end_time = time.time() + self._timeout
while(True):
try:
value = method(*args)
if not negate:
if value:
return value
else:
if not value:
return value
except self._ignored_exceptions:
pass
time.sleep(self._poll)
if(time.time() > end_time):
break
raise TimeoutException("%s did not return expected return value "
"within %s seconds." %
(method.func_name, self._timeout))
def until(self, method, *args):
return self._until(False, method, *args)
def until_not(self, method, *args):
return self._until(True, method, *args)
|
import math
from operator import itemgetter
#self.median and self.mean will corrupt if add is called after calling get_median or get_mean
#as a result get_mean_deviation and get_relative_deviation will give wrong answers
class countmean:
def __init__(self):
self.n = {}
self.sum = 0
self.count = 0
self.median = None
self.mean = None
def add(self, num):
try: self.n[num]+= 1
except KeyError: self.n[num] = 1
self.sum+= num
self.count+= 1
#returns the high median
def get_median(self):
s = sorted(self.n.items(), key = itemgetter(0))
start = 0
startamount = s[start][1]
end = len(s) - 1
endamount = s[end][1]
while start < end:
amount = min(startamount, endamount)
startamount-= amount
endamount-= amount
if(startamount == 0):
start+= 1
startamount = s[start][1]
if(endamount == 0):
end-= 1
endamount = s[end][1]
self.median = s[start][0]
return self.median
def get_mean(self):
self.mean = float(self.sum) / float(self.count)
return self.mean
#returns the mean deviation about median
def get_mean_deviation(self):
if self.median == None: get_median()
return float(sum(abs(i - self.median) * self.n[i] for i in self.n)) / float(self.count)
#returns the relative standard deviation
def get_relative_deviation(self):
if self.mean == None: get_mean()
if self.mean == 0: return 0
return 100.0 * math.sqrt(float(sum((i - self.mean) ** 2 * self.n[i] for i in self.n)) / float(self.count)) / self.mean
|
"""Defines compatibility quirks for Python 2.7."""
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
import functools
import logging
import warnings
def add_metaclass(metaclass):
"""
Class decorator for creating a class with a metaclass.
Borrowed from `six` module.
"""
@functools.wraps(metaclass)
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Borrowed from Py3 `textwrap` module.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
def deprecated_func(func):
"""Deprecates a function, printing a warning on the first usage."""
# We use a mutable container here to work around Py2's lack of
# the `nonlocal` keyword.
first_usage = [True]
@functools.wraps(func)
def wrapper(*args, **kwargs):
if first_usage[0]:
warnings.warn(
"Call to deprecated function {}.".format(func.__name__),
DeprecationWarning,
)
first_usage[0] = False
return func(*args, **kwargs)
return wrapper
if sys.version_info[0] >= 3:
def byte2int(x):
return x
elif sys.version_info[0] == 2:
def byte2int(x):
return ord(x) if type(x) == str else x
else:
raise Exception("Unsupported Python version")
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2013 Camptocamp
# Copyright 2009-2013 Akretion,
# Author: Emmanuel Samyn, Raphaël Valyi, Sébastien Beau, Joel Grand-Guillaume
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'RMA Claims Advance Location',
'version': '1.0',
'category': 'Generic Modules/CRM & SRM',
'depends': ['crm_claim_rma'
],
'author': 'Akretion',
'license': 'AGPL-3',
'website': 'http://www.akretion.com',
'description': """
RMA Claim Advance Location
==========================
This module adds the following location on warehouses :
* Carrier Loss
* RMA
* Breakage Loss
* Refurbish
* Mistake Loss
And also various wizards on icoming deliveries that allow you to move your goods easily in those
new locations from a done reception.
Using this module make the logistic flow of return a bit more complexe:
* Returning product goes into RMA location with a incoming shipment
* From the incoming shipment, forward them to another places (stock, loss,...)
WARNING: Use with caution, this module is currently not yet completely debugged and is waiting his author to be.
""",
'images': [],
'demo': [],
'data': [
'wizard/claim_make_picking_from_picking_view.xml',
'wizard/claim_make_picking_view.xml',
'stock_view.xml',
'stock_data.xml',
'claim_rma_view.xml',
'mrp_repair_view.xml'
],
'installable': True,
'application': True,
}
|
import urllib2,os
import urllib
# config
# the name of file that contain urls of all the songs to be downloaded.
mp3_urls = "temp.mp3"
# downloaded music is saved in this directory
save_dir = "songs"
# Read urls of mp3 files from file mp3_urls and download the corresponding mp3
# in save_dir directory
def download_song_from_url(url):
file_name = url.split('/')[-1]
url = urllib.quote(url,":/")
print url
req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
con = urllib2.urlopen( req )
u = con
f = open("./"+save_dir+"/"+file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
file_list = open(mp3_urls,"r")
for line in file_list:
try:
download_song_from_url(line.strip())
print "Done."
pass
except Exception, e:
# raise
print e
else:
pass
finally:
pass
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .server_properties_for_create import ServerPropertiesForCreate
class ServerPropertiesForRestore(ServerPropertiesForCreate):
"""The properties to a new server by restoring from a backup.
:param storage_mb: The maximum storage allowed for a server.
:type storage_mb: long
:param version: Server version. Possible values include: '9.5', '9.6'
:type version: str or :class:`ServerVersion
<azure.mgmt.rdbms.postgresql.models.ServerVersion>`
:param ssl_enforcement: Enable ssl enforcement or not when connect to
server. Possible values include: 'Enabled', 'Disabled'
:type ssl_enforcement: str or :class:`SslEnforcementEnum
<azure.mgmt.rdbms.postgresql.models.SslEnforcementEnum>`
:param create_mode: Polymorphic Discriminator
:type create_mode: str
:param source_server_id: The source server id to restore from.
:type source_server_id: str
:param restore_point_in_time: Restore point creation time (ISO8601
format), specifying the time to restore from.
:type restore_point_in_time: datetime
"""
_validation = {
'storage_mb': {'minimum': 1024},
'create_mode': {'required': True},
'source_server_id': {'required': True},
'restore_point_in_time': {'required': True},
}
_attribute_map = {
'storage_mb': {'key': 'storageMB', 'type': 'long'},
'version': {'key': 'version', 'type': 'str'},
'ssl_enforcement': {'key': 'sslEnforcement', 'type': 'SslEnforcementEnum'},
'create_mode': {'key': 'createMode', 'type': 'str'},
'source_server_id': {'key': 'sourceServerId', 'type': 'str'},
'restore_point_in_time': {'key': 'restorePointInTime', 'type': 'iso-8601'},
}
def __init__(self, source_server_id, restore_point_in_time, storage_mb=None, version=None, ssl_enforcement=None):
super(ServerPropertiesForRestore, self).__init__(storage_mb=storage_mb, version=version, ssl_enforcement=ssl_enforcement)
self.source_server_id = source_server_id
self.restore_point_in_time = restore_point_in_time
self.create_mode = 'PointInTimeRestore'
|
""" Test Factories. """
import datetime
from uuid import uuid4
import factory
from django.contrib import auth
from django.utils.timezone import now
from factory.django import DjangoModelFactory
from pytz import UTC
from submissions import models
User = auth.get_user_model()
class UserFactory(DjangoModelFactory):
""" Copied from edx-platform/common/djangoapps/student/tests/factories.py """
class Meta:
model = User
django_get_or_create = ('email', 'username')
_DEFAULT_PASSWORD = 'test'
username = factory.Sequence('robot{}'.format)
email = factory.Sequence('robot+test+{}@edx.org'.format)
password = factory.PostGenerationMethodCall('set_password', _DEFAULT_PASSWORD)
first_name = factory.Sequence('Robot{}'.format)
last_name = 'Test'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2012, 1, 1, tzinfo=UTC)
date_joined = datetime.datetime(2011, 1, 1, tzinfo=UTC)
class StudentItemFactory(DjangoModelFactory):
""" A Factory for the StudentItem model. """
class Meta:
model = models.StudentItem
student_id = factory.Faker('sha1')
course_id = factory.Faker('sha1')
item_id = factory.Faker('sha1')
item_type = 'openassessment'
class SubmissionFactory(DjangoModelFactory):
""" A factory for the Submission model. """
class Meta:
model = models.Submission
uuid = factory.LazyFunction(uuid4)
student_item = factory.SubFactory(StudentItemFactory)
attempt_number = 1
submitted_at = datetime.datetime.now()
created_at = datetime.datetime.now()
answer = {}
status = models.ACTIVE
class TeamSubmissionFactory(DjangoModelFactory):
""" A factory for TeamSubmission model """
class Meta:
model = models.TeamSubmission
uuid = factory.LazyFunction(uuid4)
attempt_number = 1
submitted_at = now()
course_id = factory.Faker('sha1')
item_id = factory.Faker('sha1')
team_id = factory.Faker('sha1')
submitted_by = factory.SubFactory(UserFactory)
|
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import norm, eigs
from scipy.cluster import vq
def ncutW(W, num_eigs=10, kmeans_iters=10, offset = 0.5):
"""Run the normalized cut algorithm on the affinity matrix, W.
(as implemented in Ng, Jordan, and Weiss, 2002)
Parameters
----------
W : scipy sparse matrix
Square matrix with high values for edges to be preserved, and low
values for edges to be cut.
num_eigs : int, optional
Number of eigenvectors of the affinity matrix to use for clustering.
kmeans_iters : int, optional
Number of iterations of the k-means algorithm to run when clustering
eigenvectors.
offset : float, optional
Diagonal offset used to stabilise the eigenvector computation.
Returns
-------
labels : array of int
`labels[i]` is an integer value mapping node/row `i` to the cluster
ID `labels[i]`.
eigenvectors : list of array of float
The computed eigenvectors of `W + offset * I`, where `I` is the
identity matrix of same size as `W`.
eigenvalues : array of float
The corresponding eigenvalues.
"""
n, m = W.shape
# Add an offset in case some rows are zero
# We also add the offset below to the diagonal matrix. See (Yu, 2001),
# "Understanding Popout through Repulsion" for more information. This
# helps to stabilize the eigenvector computation.
W = W + sparse.diags(np.full(n, offset))
d = np.ravel(W.sum(axis=1))
Dinv2 = sparse.diags(1 / (np.sqrt(d) + offset*np.ones(n)))
P = Dinv2 @ W @ Dinv2
# Get the eigenvectors and sort by eigenvalue
eigvals, U = eigs(P, num_eigs, which='LR')
eigvals = np.real(eigvals) # it should be real anyway
U = np.real(U)
ind = np.argsort(eigvals)[::-1]
eigvals = eigvals[ind]
U = U[:, ind]
# Normalize
for i in range(n):
U[i, :] /= norm(U[i, :])
# Cluster them into labels, running k-means multiple times
labels_list = []
distortion_list = []
for _iternum in range(kmeans_iters):
# Cluster
centroid, labels = vq.kmeans2(U, num_eigs, minit='points')
# Calculate distortion
distortion = 0
for j in range(num_eigs):
numvals = np.sum(labels == j)
if numvals == 0:
continue
distortion += np.mean([norm(v - centroid[j])**2 for (i, v) in
enumerate(U) if labels[i] == j])
# Save values
labels_list.append(labels)
distortion_list.append(distortion)
# Use lowest distortion
labels = labels_list[np.argmin(distortion_list)]
return labels, U, eigvals
|
# -*- coding: utf-8 -*-
#
## This file is part of Zenodo.
## Copyright (C) 2012, 2013 CERN.
##
## Zenodo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Zenodo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
from wtforms import RadioField
from wtforms.widgets import RadioInput, HTMLString
from invenio.modules.deposit.field_base import WebDepositField
__all__ = ['AccessRightField']
ACCESS_RIGHTS_CHOICES = [
('open', 'Open Access'),
('embargoed', 'Embargoed Access'),
('restricted', 'Restricted Access'),
('closed', 'Closed Access'),
]
ACCESS_RIGHTS_ICONS = {
'open': 'fa fa-unlock fa-fw',
'closed': 'fa fa-lock fa-fw',
'restricted': 'fa fa-warning fa-fw',
'embargoed': 'fa fa-warning fa-fw',
}
class InlineListWidget(object):
"""
Renders a list of fields as a inline list.
This is used for fields which encapsulate many inner fields as subfields.
The widget will try to iterate the field to get access to the subfields and
call them to render them.
If `prefix_label` is set, the subfield's label is printed before the field,
otherwise afterwards. The latter is useful for iterating radios or
checkboxes.
"""
def __init__(self, prefix_label=True, inline=True):
self.prefix_label = prefix_label
self.inline = " inline" if inline else ""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
html = []
for subfield in field:
if self.prefix_label:
html.append(u'<label class="%s%s">%s %s</label>' % (subfield.widget.input_type, self.inline, subfield.label.text, subfield()))
else:
html.append(u'<label class="%s%s">%s %s</label>' % (subfield.widget.input_type, self.inline, subfield(), subfield.label.text))
return HTMLString(u''.join(html))
class IconRadioInput(RadioInput):
"""
Render a single radio button with icon.
This widget is most commonly used in conjunction with ListWidget or some
other listing, as singular radio buttons are not very useful.
"""
input_type = 'radio'
def __init__(self, icons={}, **kwargs):
self.choices_icons = icons
super(IconRadioInput, self).__init__(**kwargs)
def __call__(self, field, **kwargs):
if field.checked:
kwargs['checked'] = u'checked'
html = super(IconRadioInput, self).__call__(field, **kwargs)
icon = self.choices_icons.get(field._value(), '')
if icon:
html = '%s <i class="%s"></i>' % (html, icon)
return html
def access_right_processor(form, field, submit=False, fields=None):
"""
Enable/disable fields based on access right value.
"""
form.embargo_date.flags.hidden = True
form.embargo_date.flags.disabled = True
form.license.flags.hidden = True
form.license.flags.disabled = True
form.access_conditions.flags.hidden = True
form.access_conditions.flags.disabled = True
if field.data == 'embargoed':
form.embargo_date.flags.hidden = False
form.embargo_date.flags.disabled = False
if field.data == 'restricted':
form.access_conditions.flags.hidden = False
form.access_conditions.flags.disabled = False
if field.data in ['open', 'embargoed']:
form.license.flags.hidden = False
form.license.flags.disabled = False
class AccessRightField(WebDepositField, RadioField):
widget = InlineListWidget(prefix_label=False, inline=False)
option_widget = IconRadioInput(icons=ACCESS_RIGHTS_ICONS)
def __init__(self, **kwargs):
defaults = dict(
choices=ACCESS_RIGHTS_CHOICES,
processors=[access_right_processor],
)
defaults.update(kwargs)
super(AccessRightField, self).__init__(**defaults)
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'openstackdocstheme',
'sphinxcontrib.rsvgconverter',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cloudkitty-dashboard'
copyright = u'2014, Objectif Libre'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# -- Options for LaTeX output ---------------------------------------------
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
latex_domain_indices = False
latex_elements = {
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# NOTE: Specify toctree_only=True for a better document structure of
# the generated PDF file.
latex_documents = [
(
'index',
'doc-%s.tex' % project,
u'Cloudkitty-Dashboard Documentation',
u'OpenStack Foundation', 'howto', True
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'index',
project,
u'%s Documentation' % project,
[u'Objectif Libre'],
1
),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
'index',
project,
u'%s Documentation' % project,
u'Objectif Libre',
project,
'CloudKitty Horizon Plugin',
'Miscellaneous'
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for openstackdocstheme -------------------------------------------
openstackdocs_repo_name = 'openstack/cloudkitty-dashboard'
openstackdocs_pdf_link = True
openstackdocs_auto_name = False
openstackdocs_use_storyboard = True
|
import math
import pandas as pd
from sklearn import preprocessing
# A Note on SKLearn .transform() calls:
#
# Any time you transform your data, you lose the column header names.
# This actually makes complete sense. There are essentially two types
# of transformations, those that change the scale of your features,
# and those that change your features entire. Changing the scale would
# be like changing centimeters to inches. Changing the features would
# be like using PCA to reduce 300 columns to 30. In either case, the
# original column's units have been altered or no longer exist, so it's
# up to you to rename your columns after ANY transformation. Due to
# this, SKLearn returns an NDArray from *transform() calls.
def scaleFeatures(df):
# SKLearn contains many methods for transforming your features by
# scaling them (this is a type of pre-processing):
# RobustScaler, Normalizer, MinMaxScaler, MaxAbsScaler, StandardScaler...
# http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing
#
# However in order to be effective at PCA, there are a few requirements
# that must be met, and which will drive the selection of your scaler.
# PCA required your data is standardized -- in other words it's mean is
# equal to 0, and it has ~unit variance.
#
# SKLearn's regular Normalizer doesn't zero out the mean of your data,
# it only clamps it, so it's inappropriate to use here (depending on
# your data). MinMaxScaler and MaxAbsScaler both fail to set a unit
# variance, so you won't be using them either. RobustScaler can work,
# again depending on your data (watch for outliers). For these reasons
# we're going to use the StandardScaler. Get familiar with it by visiting
# these two websites:
#
# http://scikit-learn.org/stable/modules/preprocessing.html#preprocessing-scaler
#
# http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler
#
# ---------
# Feature scaling is the type of transformation that only changes the
# scale and not number of features, so we'll use the original dataset
# column names. However we'll keep in mind that the _units_ have been
# altered:
scaled = preprocessing.StandardScaler().fit_transform(df)
scaled = pd.DataFrame(scaled, columns=df.columns)
print "New Variances:\n", scaled.var()
print "New Describe:\n", scaled.describe()
return scaled
def drawVectors(transformed_features, components_, columns, plt, scaled):
if not scaled:
return plt.axes() # No cheating ;-)
num_columns = len(columns)
# This funtion will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## visualize projections
# Sort each column by it's length. These are your *original*
# columns, not the principal components.
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75)
return ax
|
# coding: utf-8
import logging
import os
from ppyt import const
from ppyt.commands import CommandBase
from ppyt.models.orm import Setting
logger = logging.getLogger(__name__)
plogger = logging.getLogger('print')
class Command(CommandBase):
"""Settingを更新を行います。"""
def _add_options(self, parser):
# 指定すると現在の登録内容を確認できます。
parser.add_argument('-l', '--list', action='store_true')
def _execute(self, options):
Setting.register_initials() # 初期値を登録します。
if options.list is True:
plogger.info(os.linesep + '# Setting一覧' + os.linesep)
for row in Setting.get_list():
plogger.info('- Key: {}, Value: {}'.format(row.key, row.value))
plogger.info(os.linesep)
else:
selected_key = self.__select_key()
selected_value = self.__select_value(selected_key)
logger.debug('selected_value: %s' % selected_value)
# Settingを更新します。
Setting.update_value(selected_key, selected_value)
plogger.info('Key [%s] の値を [%s]に更新しました。' % (selected_key, selected_value))
def __select_key(self):
"""Key選択を表示して、入力を受け取ります。"""
plogger.info('以下の中から更新するKeyを数字で選択してください。' + os.linesep)
key_map = {(i + 1): k for i, k in enumerate(Setting.get_keys())}
return self.__get_number(key_map)
def __select_value(self, key):
"""value選択を表示し、入力を受け取ります。"""
current_value = Setting.get_value(key)
plogger.info('以下の中から更新内容を数字で選択してください。' + os.linesep)
if key == Setting.KEY_RULEFILE:
values = sorted([os.path.splitext(fn)[0] for fn in os.listdir(const.RULEFILE_DIR)
if os.path.splitext(fn)[1].lower() == '.json'])
elif key == Setting.KEY_FILTERFILE: # フィルタファイル
values = sorted([os.path.splitext(fn)[0] for fn in os.listdir(const.FILTERFILE_DIR)
if os.path.splitext(fn)[1].lower() == '.json'])
# key: (選択可能な数字), value: 設定値のdict型変数を定義します。
value_map = {}
if current_value in values: # 設定可能な値の中に、現在の設定値がある場合
values.remove(current_value) # 一覧から削除します。
value_map[0] = current_value # 0番として現在値を追加します。
# key: 1以降のkeyを追加していきます。
value_map.update({(i + 1): v for i, v in enumerate(values)})
return self.__get_number(value_map)
def __get_number(self, selectable_map):
"""入力を受け付け、入力された内容を取得します。"""
str_numbers = [str(v) for v in selectable_map.keys()]
# 選択可能な値を表示します。
plogger.info(os.linesep.join(['- {}: {} {}'.format(i, v, '(現在値)' if i == 0 else '')
for i, v in selectable_map.items()]) + os.linesep)
if len(str_numbers) == 0:
from ppyt.exceptions import CommandError
raise CommandError('選択可能な値がありません。')
# 入力を待ちます。
while True:
msg_prefix = '{}-{}'.format(str_numbers[0], str_numbers[-1]) \
if len(str_numbers) > 1 else '{}'.format(str_numbers[0])
str_number = input('[{}]の数字を入力してください。: '.format(msg_prefix))
if str_number in str_numbers:
break
return selectable_map[int(str_number)]
|
'''
Created on 21 Dec 2014
@author: chris
'''
def main():
year_0 = '2014'
results = get_results(year_0)
ranks = []
scores = []
for result in results:
# print result[0]
scores.append(result[1]*3 + result[2]*2 + result[3]*1)
# print result[4]
ranks.append(result[4])
import numpy as np
i_ranks = np.argsort(ranks)
ranks = np.array(ranks)[i_ranks]
scores = np.array(scores)[i_ranks]
for rank, score in zip(ranks,scores):
print rank, score
def get_results(year_0):
from oldschoolsql import login
cur, mysql_cn = login()
search_str = ' SELECT countries.name, results.golds, results.silvers, results.bronzes, results.rank ' +\
' FROM dates ' +\
' JOIN results ON results.year_id = dates.id ' +\
' JOIN countries ON results.country_id = countries.id ' +\
' WHERE year = ' + year_0
cur.execute(search_str)
# print dir(cur)
results = cur.fetchall()
mysql_cn.commit()
mysql_cn.close()
return results
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
##
"""Dialog for listing payment categories"""
import gtk
from kiwi.ui.gadgets import render_pixbuf
from kiwi.ui.objectlist import Column
from stoqlib.domain.workorder import WorkOrder, WorkOrderCategory
from stoqlib.gui.base.lists import ModelListDialog, ModelListSlave
from stoqlib.gui.editors.workordercategoryeditor import WorkOrderCategoryEditor
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class WorkOrderCategoryListSlave(ModelListSlave):
model_type = WorkOrderCategory
editor_class = WorkOrderCategoryEditor
columns = [
Column('name', title=_('Category'), data_type=str,
expand=True, sorted=True),
Column('color', title=_('Color'), data_type=gtk.gdk.Pixbuf,
format_func=render_pixbuf),
Column('color', data_type=str, column='color')
]
def delete_model(self, model, store):
for workorder in store.find(WorkOrder, category=model):
workorder.category = None
super(WorkOrderCategoryListSlave, self).delete_model(model, store)
class WorkOrderCategoryDialog(ModelListDialog):
list_slave_class = WorkOrderCategoryListSlave
size = (620, 300)
title = _('Work order categories')
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
VTK_VARY_RADIUS_BY_VECTOR = 2
# create pipeline
#
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader = vtk.vtkGenericEnSightReader()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName(VTK_DATA_ROOT + "/Data/EnSight/RectGrid_ascii.case")
reader.Update()
toRectilinearGrid = vtk.vtkCastToConcrete()
toRectilinearGrid.SetInputData(reader.GetOutput().GetBlock(0))
toRectilinearGrid.Update()
plane = vtk.vtkRectilinearGridGeometryFilter()
plane.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
plane.SetExtent(0, 100, 0, 100, 15, 15)
tri = vtk.vtkTriangleFilter()
tri.SetInputConnection(plane.GetOutputPort())
warper = vtk.vtkWarpVector()
warper.SetInputConnection(tri.GetOutputPort())
warper.SetScaleFactor(0.05)
planeMapper = vtk.vtkDataSetMapper()
planeMapper.SetInputConnection(warper.GetOutputPort())
planeMapper.SetScalarRange(0.197813, 0.710419)
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
cutPlane = vtk.vtkPlane()
cutPlane.SetOrigin(reader.GetOutput().GetBlock(0).GetCenter())
cutPlane.SetNormal(1, 0, 0)
planeCut = vtk.vtkCutter()
planeCut.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
planeCut.SetCutFunction(cutPlane)
cutMapper = vtk.vtkDataSetMapper()
cutMapper.SetInputConnection(planeCut.GetOutputPort())
cutMapper.SetScalarRange(
reader.GetOutput().GetBlock(0).GetPointData().GetScalars().GetRange())
cutActor = vtk.vtkActor()
cutActor.SetMapper(cutMapper)
iso = vtk.vtkContourFilter()
iso.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
iso.SetValue(0, 0.7)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(iso.GetOutputPort())
normals.SetFeatureAngle(45)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(normals.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(GetRGBColor('bisque'))
isoActor.GetProperty().SetRepresentationToWireframe()
streamer = vtk.vtkStreamLine()
streamer.SetInputData(reader.GetOutput().GetBlock(0))
streamer.SetStartPosition(-1.2, -0.1, 1.3)
streamer.SetMaximumPropagationTime(500)
streamer.SetStepLength(0.05)
streamer.SetIntegrationStepLength(0.05)
streamer.SetIntegrationDirectionToIntegrateBothDirections()
streamTube = vtk.vtkTubeFilter()
streamTube.SetInputConnection(streamer.GetOutputPort())
streamTube.SetRadius(0.025)
streamTube.SetNumberOfSides(6)
streamTube.SetVaryRadius(VTK_VARY_RADIUS_BY_VECTOR)
mapStreamTube = vtk.vtkPolyDataMapper()
mapStreamTube.SetInputConnection(streamTube.GetOutputPort())
mapStreamTube.SetScalarRange(
reader.GetOutput().GetBlock(0).GetPointData().GetScalars().GetRange())
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(mapStreamTube)
streamTubeActor.GetProperty().BackfaceCullingOn()
outline = vtk.vtkOutlineFilter()
outline.SetInputData(toRectilinearGrid.GetRectilinearGridOutput())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(GetRGBColor('black'))
# Graphics stuff
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(planeActor)
ren1.AddActor(cutActor)
ren1.AddActor(isoActor)
ren1.AddActor(streamTubeActor)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(400, 400)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.76213, 10.712)
cam1.SetFocalPoint(-0.0842503, -0.136905, 0.610234)
cam1.SetPosition(2.53813, 2.2678, -5.22172)
cam1.SetViewUp(-0.241047, 0.930635, 0.275343)
reader.SetDefaultExecutivePrototype(None)
iren.Initialize()
# render the image
#
#iren.Start()
|
# -*- coding: utf-8 -*-
# Standard Library
import uuid
# Third Party Stuff
from rest_framework import permissions
# Junction Stuff
from junction.conferences.permissions import is_reviewer
from junction.devices.models import Device
def get_authorization_header(request):
auth = request.META.get('HTTP_AUTHORIZATION')
return auth
class CanSubmitFeedBack(permissions.BasePermission):
def has_permission(self, request, view):
token = get_authorization_header(request)
if token:
device_uuid = token.split()[-1]
view.device_uuid = uuid.UUID(device_uuid)
if device_uuid:
return Device.objects.filter(uuid=view.device_uuid).exists()
return False
return False
def can_view_feedback(user, schedule_item):
"""Given a schedule item object, say a requesting user can view the
feedback.
"""
if user.is_superuser:
return True
session = schedule_item.session
res = is_reviewer(user=user, conference=schedule_item.conference)
return session and (session.author == user or res)
|
"""
Django settings for picha project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'social.apps.django_app.default',
'api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'picha.urls'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'static/templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
WSGI_APPLICATION = 'picha.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {}
# Parse database configuration from $DATABASE_URL
if os.getenv('TRAVIS_BUILD', None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bucketlist_db',
'USER': 'travis',
'PASSWORD': '',
'HOST': '127.0.0.1',
}
}
else:
DATABASES['default'] = dj_database_url.config()
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
static_route = os.path.join(BASE_DIR, "static")
MEDIA_ROOT = os.path.join(static_route, 'media')
MEDIA_URL = '/static/media/'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('FACEBOOK_SECRET')
# Use nose to run all tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Tell nose to measure coverage on the 'foo' and 'bar' apps
NOSE_ARGS = [
'--with-coverage',
]
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates an AndroidManifest.xml for an APK split.
Given the manifest file for the main APK, generates an AndroidManifest.xml with
the value required for a Split APK (package, versionCode, etc).
"""
import lxml.etree
import optparse
from util import build_utils
MANIFEST_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="%(package)s"
split="%(split)s">
<application android:hasCode="%(has_code)s">
</application>
</manifest>
"""
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--main-manifest', help='The main manifest of the app')
parser.add_option('--out-manifest', help='The output manifest')
parser.add_option('--split', help='The name of the split')
parser.add_option(
'--has-code',
action='store_true',
default=False,
help='Whether the split will contain a .dex file')
(options, args) = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('main_manifest', 'out_manifest', 'split')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def Build(main_manifest, split, has_code):
"""Builds a split manifest based on the manifest of the main APK.
Args:
main_manifest: the XML manifest of the main APK as a string
split: the name of the split as a string
has_code: whether this split APK will contain .dex files
Returns:
The XML split manifest as a string
"""
doc = lxml.etree.fromstring(main_manifest)
package = doc.xpath('/manifest/@package')[0]
return MANIFEST_TEMPLATE % {
'package': package,
'split': split.replace('-', '_'),
'has_code': str(has_code).lower()
}
def main():
options = ParseArgs()
main_manifest = file(options.main_manifest).read()
split_manifest = Build(
main_manifest,
options.split,
options.has_code)
with file(options.out_manifest, 'w') as f:
f.write(split_manifest)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
[main_manifest] + build_utils.GetPythonDependencies())
if __name__ == '__main__':
main()
|
# Copyright (c) 2011-2012 Florian Mounier
# Copyright (c) 2012-2014 roger
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sebastian Kricner
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import libqtile.hook
from libqtile.command import lazy
from libqtile.config import Group, Key, Match, Rule
from libqtile.log_utils import logger
def simple_key_binder(mod, keynames=None):
"""Bind keys to mod+group position or to the keys specified as second argument"""
def func(dgroup):
# unbind all
for key in dgroup.keys[:]:
dgroup.qtile.ungrab_key(key)
dgroup.keys.remove(key)
if keynames:
keys = keynames
else:
# keys 1 to 9 and 0
keys = list(map(str, list(range(1, 10)) + [0]))
# bind all keys
for keyname, group in zip(keys, dgroup.qtile.groups):
name = group.name
key = Key([mod], keyname, lazy.group[name].toscreen())
key_s = Key([mod, "shift"], keyname, lazy.window.togroup(name))
key_c = Key(
[mod, "control"],
keyname,
lazy.group.switch_groups(name)
)
dgroup.keys.append(key)
dgroup.keys.append(key_s)
dgroup.keys.append(key_c)
dgroup.qtile.grab_key(key)
dgroup.qtile.grab_key(key_s)
dgroup.qtile.grab_key(key_c)
return func
class DGroups:
"""Dynamic Groups"""
def __init__(self, qtile, dgroups, key_binder=None, delay=1):
self.qtile = qtile
self.groups = dgroups
self.groups_map = {}
self.rules = []
self.rules_map = {}
self.last_rule_id = 0
for rule in getattr(qtile.config, 'dgroups_app_rules', []):
self.add_rule(rule)
self.keys = []
self.key_binder = key_binder
self._setup_hooks()
self._setup_groups()
self.delay = delay
self.timeout = {}
def add_rule(self, rule, last=True):
rule_id = self.last_rule_id
self.rules_map[rule_id] = rule
if last:
self.rules.append(rule)
else:
self.rules.insert(0, rule)
self.last_rule_id += 1
return rule_id
def remove_rule(self, rule_id):
rule = self.rules_map.get(rule_id)
if rule:
self.rules.remove(rule)
del self.rules_map[rule_id]
else:
logger.warn('Rule "%s" not found', rule_id)
def add_dgroup(self, group, start=False):
self.groups_map[group.name] = group
rules = [Rule(m, group=group.name) for m in group.matches]
self.rules.extend(rules)
if start:
self.qtile.add_group(group.name, group.layout, group.layouts, group.label)
def _setup_groups(self):
for group in self.groups:
self.add_dgroup(group, group.init)
if group.spawn and not self.qtile.no_spawn:
if isinstance(group.spawn, str):
spawns = [group.spawn]
else:
spawns = group.spawn
for spawn in spawns:
pid = self.qtile.cmd_spawn(spawn)
self.add_rule(Rule(Match(net_wm_pid=[pid]), group.name))
def _setup_hooks(self):
libqtile.hook.subscribe.addgroup(self._addgroup)
libqtile.hook.subscribe.client_new(self._add)
libqtile.hook.subscribe.client_killed(self._del)
if self.key_binder:
libqtile.hook.subscribe.setgroup(
lambda: self.key_binder(self)
)
libqtile.hook.subscribe.changegroup(
lambda: self.key_binder(self)
)
def _addgroup(self, qtile, group_name):
if group_name not in self.groups_map:
self.add_dgroup(Group(group_name, persist=False))
def _add(self, client):
if client in self.timeout:
logger.info('Remove dgroup source')
self.timeout.pop(client).cancel()
# ignore static windows
if client.defunct:
return
# ignore windows whose groups is already set (e.g. from another hook or
# when it was set on state restore)
if client.group is not None:
return
group_set = False
intrusive = False
for rule in self.rules:
# Matching Rules
if rule.matches(client):
if rule.group:
if rule.group in self.groups_map:
layout = self.groups_map[rule.group].layout
layouts = self.groups_map[rule.group].layouts
label = self.groups_map[rule.group].label
else:
layout = None
layouts = None
label = None
group_added = self.qtile.add_group(rule.group, layout, layouts, label)
client.togroup(rule.group)
group_set = True
group_obj = self.qtile.groups_map[rule.group]
group = self.groups_map.get(rule.group)
if group and group_added:
for k, v in list(group.layout_opts.items()):
if isinstance(v, collections.Callable):
v(group_obj.layout)
else:
setattr(group_obj.layout, k, v)
affinity = group.screen_affinity
if affinity and len(self.qtile.screens) > affinity:
self.qtile.screens[affinity].set_group(group_obj)
if rule.float:
client.enablefloating()
if rule.intrusive:
intrusive = rule.intrusive
if rule.break_on_match:
break
# If app doesn't have a group
if not group_set:
current_group = self.qtile.current_group.name
if current_group in self.groups_map and \
self.groups_map[current_group].exclusive and \
not intrusive:
wm_class = client.window.get_wm_class()
if wm_class:
if len(wm_class) > 1:
wm_class = wm_class[1]
else:
wm_class = wm_class[0]
group_name = wm_class
else:
group_name = client.name or 'Unnamed'
self.add_dgroup(Group(group_name, persist=False), start=True)
client.togroup(group_name)
self.sort_groups()
def sort_groups(self):
grps = self.qtile.groups
sorted_grps = sorted(grps, key=lambda g: self.groups_map[g.name].position)
if grps != sorted_grps:
self.qtile.groups = sorted_grps
libqtile.hook.fire("changegroup")
def _del(self, client):
group = client.group
def delete_client():
# Delete group if empty and don't persist
if group and group.name in self.groups_map and \
not self.groups_map[group.name].persist and \
len(group.windows) <= 0:
self.qtile.delete_group(group.name)
self.sort_groups()
del self.timeout[client]
# Wait the delay until really delete the group
logger.info('Add dgroup timer with delay {}s'.format(self.delay))
self.timeout[client] = self.qtile.call_later(
self.delay, delete_client
)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ProductItem.special_offer'
db.alter_column(u'djangocms_product_productitem', 'special_offer', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'ProductItem.special_offer'
raise RuntimeError("Cannot reverse this migration. 'ProductItem.special_offer' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'ProductItem.special_offer'
db.alter_column(u'djangocms_product_productitem', 'special_offer', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'djangocms_product.productcategory': {
'Meta': {'object_name': 'ProductCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'djangocms_product.productimage': {
'Meta': {'ordering': "['ordering']", 'object_name': 'ProductImage'},
'alt': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'}),
'image_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {}),
'product_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['djangocms_product.ProductItem']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '150', 'blank': 'True'})
},
u'djangocms_product.productitem': {
'Meta': {'ordering': "('-changed_at',)", 'object_name': 'ProductItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '20', 'decimal_places': '2'}),
'product_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djangocms_product.ProductCategory']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'special_offer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'djangocms_product.productteaser': {
'Meta': {'object_name': 'ProductTeaser', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'ordering': ('django.db.models.fields.CharField', [], {'default': "'past_desc'", 'max_length': '20'}),
'product_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['djangocms_product.ProductCategory']", 'symmetrical': 'False'}),
'target_page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['djangocms_product']
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cPickle as pickle
import time
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.objects import aggregate as aggregate_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
from nova import utils
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
LOG = logging.getLogger(__name__)
xenapi_session_opts = [
cfg.IntOpt('login_timeout',
default=10,
deprecated_name='xenapi_login_timeout',
deprecated_group='DEFAULT',
help='Timeout in seconds for XenAPI login.'),
cfg.IntOpt('connection_concurrent',
default=5,
deprecated_name='xenapi_connection_concurrent',
deprecated_group='DEFAULT',
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_session_opts, 'xenserver')
CONF.import_opt('host', 'nova.netconf')
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
# This is not a config option as it should only ever be
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.0'
def __init__(self, url, user, pw):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.host_ref = self._get_host_ref()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._verify_plugin_version()
def _verify_plugin_version(self):
requested_version = self.PLUGIN_REQUIRED_VERSION
current_version = self.call_plugin_serialized(
'nova_plugin_version', 'get_version')
if not versionutils.is_compatible(requested_version, current_version):
raise self.XenAPI.Failure(
_("Plugin version mismatch (Expected %(exp)s, got %(got)s)") %
{'exp': requested_version, 'got': current_version})
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenserver.connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = aggregate_obj.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
# Product version is only set in some cases (e.g. XCP, XenServer) and
# not in others (e.g. xenserver-core, XAPI-XCP).
# In these cases, the platform version is the best number to use.
if product_version_str is None:
product_version_str = software_version.get('platform_version',
'0.0.0')
product_brand = software_version.get('product_brand')
product_version = utils.convert_version_to_tuple(product_version_str)
return product_version, product_brand
def _get_software_version(self):
return self.call_xenapi('host.get_software_version', self.host_ref)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def _get_host_ref(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
self.host_ref, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in xrange(1, attempts + 1):
LOG.info(_('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d'),
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts})
try:
if attempt > 1:
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
if callback:
callback(kwargs)
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
except self.XenAPI.Failure as exc:
if self._is_retryable_exception(exc):
LOG.warn(_('%(plugin)s.%(fn)s failed. Retrying call.')
% {'plugin': plugin, 'fn': fn})
else:
raise
raise exception.PluginRetriesExceeded(num_retries=num_retries)
def _is_retryable_exception(self, exc):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug(_("RetryableError, so retrying upload_vhd"),
exc_info=True)
return True
elif "signal" in method:
LOG.debug(_("Error due to a signal, retrying upload_vhd"),
exc_info=True)
return True
else:
return False
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
self.is_local_connection = url == "unix://local"
if self.is_local_connection:
return self.XenAPI.xapi_local()
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug(_("Got exception: %s"), exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
for ref in self.call_xenapi('%s.get_all' % record_type):
rec = self.get_rec(record_type, ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_record call
if rec:
yield ref, rec
|
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
from aiida.orm.data.array.trajectory import TrajectoryData
from aiida.orm.data.array import ArrayData
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.utils import classproperty
import numpy as np
def get_FORCE_CONSTANTS_txt(force_constants):
force_constants = force_constants.get_array('force_constants')
fc_shape = force_constants.shape
fc_txt = "%4d\n" % (fc_shape[0])
for i in range(fc_shape[0]):
for j in range(fc_shape[1]):
fc_txt += "%4d%4d\n" % (i+1, j+1)
for vec in force_constants[i][j]:
fc_txt +=("%22.15f"*3 + "\n") % tuple(vec)
return fc_txt
def get_trajectory_txt(trajectory):
cell = trajectory.get_cells()[0]
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.arccos(np.dot(cell[1], cell[2])/(c*b))
gamma = np.arccos(np.dot(cell[1], cell[0])/(a*b))
beta = np.arccos(np.dot(cell[2], cell[0])/(a*c))
xhi = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
yhi = np.sqrt(pow(b,2)- pow(xy,2))
yz = (b*c*np.cos(alpha)-xy * xz)/yhi
zhi = np.sqrt(pow(c,2)-pow(xz,2)-pow(yz,2))
xhi = xhi + max(0,0, xy, xz, xy+xz)
yhi = yhi + max(0,0, yz)
xlo_bound = np.min([0.0, xy, xz, xy+xz])
xhi_bound = xhi + np.max([0.0, xy, xz, xy+xz])
ylo_bound = np.min([0.0, yz])
yhi_bound = yhi + np.max([0.0, yz])
zlo_bound = 0
zhi_bound = zhi
ind = trajectory.get_array('steps')
lammps_data_file = ''
for i, position_step in enumerate(trajectory.get_positions()):
lammps_data_file += 'ITEM: TIMESTEP\n'
lammps_data_file += '{}\n'.format(ind[i])
lammps_data_file += 'ITEM: NUMBER OF ATOMS\n'
lammps_data_file += '{}\n'.format(len(position_step))
lammps_data_file += 'ITEM: BOX BOUNDS xy xz yz pp pp pp\n'
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(xlo_bound, xhi_bound, xy)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(ylo_bound, yhi_bound, xz)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(zlo_bound, zhi_bound, yz)
lammps_data_file += ('ITEM: ATOMS x y z\n')
for position in position_step:
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(*position)
return lammps_data_file
def structure_to_poscar(structure):
types = [site.kind_name for site in structure.sites]
atom_type_unique = np.unique(types, return_index=True)
sort_index = np.argsort(atom_type_unique[1])
elements = np.array(atom_type_unique[0])[sort_index]
elements_count= np.diff(np.append(np.array(atom_type_unique[1])[sort_index], [len(types)]))
poscar = '# VASP POSCAR generated using aiida workflow '
poscar += '\n1.0\n'
cell = structure.cell
for row in cell:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*row)
poscar += ' '.join([str(e) for e in elements]) + '\n'
poscar += ' '.join([str(e) for e in elements_count]) + '\n'
poscar += 'Cartesian\n'
for site in structure.sites:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*site.position)
return poscar
def parameters_to_input_file(parameters_object):
parameters = parameters_object.get_dict()
input_file = ('STRUCTURE FILE POSCAR\nPOSCAR\n\n')
input_file += ('FORCE CONSTANTS\nFORCE_CONSTANTS\n\n')
input_file += ('PRIMITIVE MATRIX\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[2])
input_file += ('\n')
input_file += ('SUPERCELL MATRIX PHONOPY\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[2])
input_file += ('\n')
return input_file
class DynaphopyCalculation(JobCalculation):
"""
A basic plugin for calculating force constants using Phonopy.
Requirement: the node should be able to import phonopy
"""
def _init_internal_params(self):
super(DynaphopyCalculation, self)._init_internal_params()
self._INPUT_FILE_NAME = 'input_dynaphopy'
self._INPUT_TRAJECTORY = 'trajectory'
self._INPUT_CELL = 'POSCAR'
self._INPUT_FORCE_CONSTANTS = 'FORCE_CONSTANTS'
self._OUTPUT_FORCE_CONSTANTS = 'FORCE_CONSTANTS_OUT'
self._OUTPUT_FILE_NAME = 'OUTPUT'
self._OUTPUT_QUASIPARTICLES = 'quasiparticles_data.yaml'
self._default_parser = 'dynaphopy'
@classproperty
def _use_methods(cls):
"""
Additional use_* methods for the namelists class.
"""
retdict = JobCalculation._use_methods
retdict.update({
"parameters": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the dynaphopy input "
"for the namelists"),
},
"trajectory": {
'valid_types': TrajectoryData,
'additional_parameter': None,
'linkname': 'trajectory',
'docstring': ("Use a node that specifies the trajectory data "
"for the namelists"),
},
"force_constants": {
'valid_types': ArrayData,
'additional_parameter': None,
'linkname': 'force_constants',
'docstring': ("Use a node that specifies the force_constants "
"for the namelists"),
},
"structure": {
'valid_types': StructureData,
'additional_parameter': None,
'linkname': 'structure',
'docstring': "Use a node for the structure",
},
})
return retdict
def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
parameters_data = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
pass
#raise InputValidationError("No parameters specified for this "
# "calculation")
if not isinstance(parameters_data, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("no structure is specified for this calculation")
try:
trajectory = inputdict.pop(self.get_linkname('trajectory'))
except KeyError:
raise InputValidationError("trajectory is specified for this calculation")
try:
force_constants = inputdict.pop(self.get_linkname('force_constants'))
except KeyError:
raise InputValidationError("no force_constants is specified for this calculation")
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("no code is specified for this calculation")
time_step = trajectory.get_times()[1]-trajectory.get_times()[0]
##############################
# END OF INITIAL INPUT CHECK #
##############################
# =================== prepare the python input files =====================
cell_txt = structure_to_poscar(structure)
input_txt = parameters_to_input_file(parameters_data)
force_constants_txt = get_FORCE_CONSTANTS_txt(force_constants)
trajectory_txt = get_trajectory_txt(trajectory)
# =========================== dump to file =============================
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename, 'w') as infile:
infile.write(input_txt)
cell_filename = tempfolder.get_abs_path(self._INPUT_CELL)
with open(cell_filename, 'w') as infile:
infile.write(cell_txt)
force_constants_filename = tempfolder.get_abs_path(self._INPUT_FORCE_CONSTANTS)
with open(force_constants_filename, 'w') as infile:
infile.write(force_constants_txt)
trajectory_filename = tempfolder.get_abs_path(self._INPUT_TRAJECTORY)
with open(trajectory_filename, 'w') as infile:
infile.write(trajectory_txt)
# ============================ calcinfo ================================
local_copy_list = []
remote_copy_list = []
# additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[])
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
# Empty command line by default
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
# Retrieve files
calcinfo.retrieve_list = [self._OUTPUT_FILE_NAME,
self._OUTPUT_FORCE_CONSTANTS,
self._OUTPUT_QUASIPARTICLES]
codeinfo = CodeInfo()
codeinfo.cmdline_params = [self._INPUT_FILE_NAME, self._INPUT_TRAJECTORY,
'-ts', '{}'.format(time_step), '--silent',
'-sfc', self._OUTPUT_FORCE_CONSTANTS, '-thm', # '--resolution 0.01',
'-psm','2', '--normalize_dos', '-sdata']
if 'temperature' in parameters_data.get_dict():
codeinfo.cmdline_params.append('--temperature')
codeinfo.cmdline_params.append('{}'.format(parameters_data.dict.temperature))
if 'md_commensurate' in parameters_data.get_dict():
if parameters_data.dict.md_commensurate:
codeinfo.cmdline_params.append('--MD_commensurate')
codeinfo.stdout_name = self._OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
codeinfo.withmpi = False
calcinfo.codes_info = [codeinfo]
return calcinfo
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import time
import re
import datetime
import warnings
from decimal import Decimal
from collections import OrderedDict, defaultdict
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy import _erfa as erfa
from . import utils
from .utils import day_frac, quantity_day_frac, two_sum, two_product
__all__ = ['TimeFormat', 'TimeJD', 'TimeMJD', 'TimeFromEpoch', 'TimeUnix',
'TimeCxcSec', 'TimeGPS', 'TimeDecimalYear',
'TimePlotDate', 'TimeUnique', 'TimeDatetime', 'TimeString',
'TimeISO', 'TimeISOT', 'TimeFITS', 'TimeYearDayTime',
'TimeEpochDate', 'TimeBesselianEpoch', 'TimeJulianEpoch',
'TimeDeltaFormat', 'TimeDeltaSec', 'TimeDeltaJD',
'TimeEpochDateString', 'TimeBesselianEpochString',
'TimeJulianEpochString', 'TIME_FORMATS', 'TIME_DELTA_FORMATS',
'TimezoneInfo', 'TimeDeltaDatetime', 'TimeDatetime64', 'TimeYMDHMS',
'TimeNumeric', 'TimeDeltaNumeric']
__doctest_skip__ = ['TimePlotDate']
# These both get filled in at end after TimeFormat subclasses defined.
# Use an OrderedDict to fix the order in which formats are tried.
# This ensures, e.g., that 'isot' gets tried before 'fits'.
TIME_FORMATS = OrderedDict()
TIME_DELTA_FORMATS = OrderedDict()
# Translations between deprecated FITS timescales defined by
# Rots et al. 2015, A&A 574:A36, and timescales used here.
FITS_DEPRECATED_SCALES = {'TDT': 'tt', 'ET': 'tt',
'GMT': 'utc', 'UT': 'utc', 'IAT': 'tai'}
def _regexify_subfmts(subfmts):
"""
Iterate through each of the sub-formats and try substituting simple
regular expressions for the strptime codes for year, month, day-of-month,
hour, minute, second. If no % characters remain then turn the final string
into a compiled regex. This assumes time formats do not have a % in them.
This is done both to speed up parsing of strings and to allow mixed formats
where strptime does not quite work well enough.
"""
new_subfmts = []
for subfmt_tuple in subfmts:
subfmt_in = subfmt_tuple[1]
if isinstance(subfmt_in, str):
for strptime_code, regex in (('%Y', r'(?P<year>\d\d\d\d)'),
('%m', r'(?P<mon>\d{1,2})'),
('%d', r'(?P<mday>\d{1,2})'),
('%H', r'(?P<hour>\d{1,2})'),
('%M', r'(?P<min>\d{1,2})'),
('%S', r'(?P<sec>\d{1,2})')):
subfmt_in = subfmt_in.replace(strptime_code, regex)
if '%' not in subfmt_in:
subfmt_tuple = (subfmt_tuple[0],
re.compile(subfmt_in + '$'),
subfmt_tuple[2])
new_subfmts.append(subfmt_tuple)
return tuple(new_subfmts)
class TimeFormatMeta(type):
"""
Metaclass that adds `TimeFormat` and `TimeDeltaFormat` to the
`TIME_FORMATS` and `TIME_DELTA_FORMATS` registries, respectively.
"""
_registry = TIME_FORMATS
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Register time formats that have a name, but leave out astropy_time since
# it is not a user-accessible format and is only used for initialization into
# a different format.
if 'name' in members and cls.name != 'astropy_time':
# FIXME: check here that we're not introducing a collision with
# an existing method or attribute; problem is it could be either
# astropy.time.Time or astropy.time.TimeDelta, and at the point
# where this is run neither of those classes have necessarily been
# constructed yet.
if 'value' in members and not hasattr(members['value'], "fget"):
raise ValueError("If defined, 'value' must be a property")
mcls._registry[cls.name] = cls
if 'subfmts' in members:
cls.subfmts = _regexify_subfmts(members['subfmts'])
return cls
class TimeFormat(metaclass=TimeFormatMeta):
"""
Base class for time representations.
Parameters
----------
val1 : numpy ndarray, list, number, str, or bytes
Values to initialize the time or times. Bytes are decoded as ascii.
val2 : numpy ndarray, list, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
scale : str
Time scale of input value(s)
precision : int
Precision for seconds as floating point
in_subfmt : str
Select subformat for inputting string times
out_subfmt : str
Select subformat for outputting string times
from_jd : bool
If true then val1, val2 are jd1, jd2
"""
_default_scale = 'utc' # As of astropy 0.4
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale # validation of scale done later with _check_scale
self.precision = precision
self.in_subfmt = in_subfmt
self.out_subfmt = out_subfmt
self._jd1, self._jd2 = None, None
if from_jd:
self.jd1 = val1
self.jd2 = val2
else:
val1, val2 = self._check_val_type(val1, val2)
self.set_jds(val1, val2)
@property
def jd1(self):
return self._jd1
@jd1.setter
def jd1(self, jd1):
self._jd1 = _validate_jd_for_storage(jd1)
if self._jd2 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
@property
def jd2(self):
return self._jd2
@jd2.setter
def jd2(self, jd2):
self._jd2 = _validate_jd_for_storage(jd2)
if self._jd1 is not None:
self._jd1, self._jd2 = _broadcast_writeable(self._jd1, self._jd2)
def __len__(self):
return len(self.jd1)
@property
def scale(self):
"""Time scale"""
self._scale = self._check_scale(self._scale)
return self._scale
@scale.setter
def scale(self, val):
self._scale = val
def mask_if_needed(self, value):
if self.masked:
value = np.ma.array(value, mask=self.mask, copy=False)
return value
@property
def mask(self):
if 'mask' not in self.cache:
self.cache['mask'] = np.isnan(self.jd2)
if self.cache['mask'].shape:
self.cache['mask'].flags.writeable = False
return self.cache['mask']
@property
def masked(self):
if 'masked' not in self.cache:
self.cache['masked'] = bool(np.any(self.mask))
return self.cache['masked']
@property
def jd2_filled(self):
return np.nan_to_num(self.jd2) if self.masked else self.jd2
@lazyproperty
def cache(self):
"""
Return the cache associated with this instance.
"""
return defaultdict(dict)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
# val1 cannot contain nan, but val2 can contain nan
ok1 = (val1.dtype.kind == 'f' and val1.dtype.itemsize >= 8
and np.all(np.isfinite(val1)) or val1.size == 0)
ok2 = val2 is None or (
val2.dtype.kind == 'f' and val2.dtype.itemsize >= 8
and not np.any(np.isinf(val2))) or val2.size == 0
if not (ok1 and ok2):
raise TypeError('Input values for {} class must be finite doubles'
.format(self.name))
if getattr(val1, 'unit', None) is not None:
# Convert any quantity-likes to days first, attempting to be
# careful with the conversion, so that, e.g., large numbers of
# seconds get converted without loosing precision because
# 1/86400 is not exactly representable as a float.
val1 = u.Quantity(val1, copy=False)
if val2 is not None:
val2 = u.Quantity(val2, copy=False)
try:
val1, val2 = quantity_day_frac(val1, val2)
except u.UnitsError:
raise u.UnitConversionError(
"only quantities with time units can be "
"used to instantiate Time instances.")
# We now have days, but the format may expect another unit.
# On purpose, multiply with 1./day_unit because typically it is
# 1./erfa.DAYSEC, and inverting it recovers the integer.
# (This conversion will get undone in format's set_jds, hence
# there may be room for optimizing this.)
factor = 1. / getattr(self, 'unit', 1.)
if factor != 1.:
val1, carry = two_product(val1, factor)
carry += val2 * factor
val1, val2 = two_sum(val1, carry)
elif getattr(val2, 'unit', None) is not None:
raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
def asarray_or_scalar(val):
"""
Remove ndarray subclasses since for jd1/jd2 we want a pure ndarray
or a Python or numpy scalar.
"""
return np.asarray(val) if isinstance(val, np.ndarray) else val
return asarray_or_scalar(val1), asarray_or_scalar(val2)
def _check_scale(self, scale):
"""
Return a validated scale value.
If there is a class attribute 'scale' then that defines the default /
required time scale for this format. In this case if a scale value was
provided that needs to match the class default, otherwise return
the class default.
Otherwise just make sure that scale is in the allowed list of
scales. Provide a different error message if `None` (no value) was
supplied.
"""
if scale is None:
scale = self._default_scale
if scale not in TIME_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_SCALES))
return scale
def set_jds(self, val1, val2):
"""
Set internal jd1 and jd2 from val1 and val2. Must be provided
by derived classes.
"""
raise NotImplementedError
def to_value(self, parent=None):
"""
Return time representation from internal jd1 and jd2. This is
the base method that ignores ``parent`` and requires that
subclasses implement the ``value`` property. Subclasses that
require ``parent`` or have other optional args for ``to_value``
should compute and return the value directly.
"""
return self.mask_if_needed(self.value)
@property
def value(self):
raise NotImplementedError
def _select_subfmts(self, pattern):
"""
Return a list of subformats where name matches ``pattern`` using
fnmatch.
"""
fnmatchcase = fnmatch.fnmatchcase
subfmts = [x for x in self.subfmts if fnmatchcase(x[0], pattern)]
if len(subfmts) == 0:
raise ValueError(f'No subformats match {pattern}')
return subfmts
class TimeNumeric(TimeFormat):
subfmts = (
('float', np.float64, None, np.add),
('long', np.longdouble, utils.longdouble_to_twoval,
utils.twoval_to_longdouble),
('decimal', np.object_, utils.decimal_to_twoval,
utils.twoval_to_decimal),
('str', np.str_, utils.decimal_to_twoval, utils.twoval_to_string),
('bytes', np.bytes_, utils.bytes_to_twoval, utils.twoval_to_bytes),
)
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if val1.dtype.kind == 'f':
val1, val2 = super()._check_val_type(val1, val2)
elif (val2 is not None
or not (val1.dtype.kind in 'US'
or (val1.dtype.kind == 'O'
and all(isinstance(v, Decimal) for v in val1.flat)))):
raise TypeError(
'for {} class, input should be doubles, string, or Decimal, '
'and second values are only allowed for doubles.'
.format(self.name))
subfmts = self._select_subfmts(self.in_subfmt)
for subfmt, dtype, convert, _ in subfmts:
if np.issubdtype(val1.dtype, dtype):
break
else:
raise ValueError('input type not among selected sub-formats.')
if convert is not None:
try:
val1, val2 = convert(val1, val2)
except Exception:
raise TypeError(
'for {} class, input should be (long) doubles, string, '
'or Decimal, and second values are only allowed for '
'(long) doubles.'.format(self.name))
return val1, val2
def to_value(self, jd1=None, jd2=None, parent=None, out_subfmt=None):
"""
Return time representation from internal jd1 and jd2.
Subclasses that require ``parent`` or to adjust the jds should
override this method.
"""
# TODO: do this in metaclass.
if self.__class__.value.fget is not self.__class__.to_value:
return self.value
if jd1 is None:
jd1 = self.jd1
if jd2 is None:
jd2 = self.jd2
if out_subfmt is None:
out_subfmt = self.out_subfmt
subfmt = self._select_subfmts(out_subfmt)[0]
kwargs = {}
if subfmt[0] in ('str', 'bytes'):
unit = getattr(self, 'unit', 1)
digits = int(np.ceil(np.log10(unit / np.finfo(float).eps)))
# TODO: allow a way to override the format.
kwargs['fmt'] = f'.{digits}f'
value = subfmt[3](jd1, jd2, **kwargs)
return self.mask_if_needed(value)
value = property(to_value)
class TimeJD(TimeNumeric):
"""
Julian Date time format.
This represents the number of days since the beginning of
the Julian Period.
For example, 2451544.5 in JD is midnight on January 1, 2000.
"""
name = 'jd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2)
class TimeMJD(TimeNumeric):
"""
Modified Julian Date time format.
This represents the number of days since midnight on November 17, 1858.
For example, 51544.0 in MJD is midnight on January 1, 2000.
"""
name = 'mjd'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
jd1, jd2 = day_frac(val1, val2)
jd1 += erfa.DJM0 # erfa.DJM0=2400000.5 (from erfam.h).
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd1 = self.jd1 - erfa.DJM0 # This cannot lose precision.
jd2 = self.jd2
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDecimalYear(TimeNumeric):
"""
Time as a decimal year, with integer values corresponding to midnight
of the first day of each year. For example 2000.5 corresponds to the
ISO time '2000-07-02 00:00:00'.
"""
name = 'decimalyear'
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
sum12, err12 = two_sum(val1, val2)
iy_start = np.trunc(sum12).astype(int)
extra, y_frac = two_sum(sum12, -iy_start)
y_frac += extra + err12
val = (val1 + val2).astype(np.double)
iy_start = np.trunc(val).astype(int)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(y_frac)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
t_frac = t_start + (t_end - t_start) * y_frac
self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
def to_value(self, **kwargs):
scale = self.scale.upper().encode('ascii')
iy_start, ims, ids, ihmsfs = erfa.d2dtf(scale, 0, # precision=0
self.jd1, self.jd2_filled)
imon = np.ones_like(iy_start)
iday = np.ones_like(iy_start)
ihr = np.zeros_like(iy_start)
imin = np.zeros_like(iy_start)
isec = np.zeros_like(self.jd1)
# Possible enhancement: use np.unique to only compute start, stop
# for unique values of iy_start.
scale = self.scale.upper().encode('ascii')
jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
ihr, imin, isec)
jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
ihr, imin, isec)
# Trying to be precise, but more than float64 not useful.
dt = (self.jd1 - jd1_start) + (self.jd2 - jd2_start)
dt_end = (jd1_end - jd1_start) + (jd2_end - jd2_start)
decimalyear = iy_start + dt / dt_end
return super().to_value(jd1=decimalyear, jd2=0., **kwargs)
value = property(to_value)
class TimeFromEpoch(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a floating point multiple of a unit time interval (e.g. seconds
or days).
"""
def __init__(self, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
self.scale = scale
# Initialize the reference epoch (a single time defined in subclasses)
epoch = Time(self.epoch_val, self.epoch_val2, scale=self.epoch_scale,
format=self.epoch_format)
self.epoch = epoch
# Now create the TimeFormat object as normal
super().__init__(val1, val2, scale, precision, in_subfmt, out_subfmt,
from_jd)
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1. / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(Time(jd1, jd2, scale=self.epoch_scale,
format='jd'), self.scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err)) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError('cannot compute value without parent Time object')
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError("Cannot convert from '{}' epoch scale '{}'"
"to specified scale '{}', got error:\n{}"
.format(self.name, self.epoch_scale,
self.scale, err)) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1. / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
class TimeUnix(TimeFromEpoch):
"""
Unix time: seconds from 1970-01-01 00:00:00 UTC.
For example, 946684800.0 in Unix time is midnight on January 1, 2000.
NOTE: this quantity is not exactly unix time and differs from the strict
POSIX definition by up to 1 second on days with a leap second. POSIX
unix time actually jumps backward by 1 second at midnight on leap second
days while this class value is monotonically increasing at 86400 seconds
per UTC day.
"""
name = 'unix'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1970-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'iso'
class TimeCxcSec(TimeFromEpoch):
"""
Chandra X-ray Center seconds from 1998-01-01 00:00:00 TT.
For example, 63072064.184 is midnight on January 1, 2000.
"""
name = 'cxcsec'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1998-01-01 00:00:00'
epoch_val2 = None
epoch_scale = 'tt'
epoch_format = 'iso'
class TimeGPS(TimeFromEpoch):
"""GPS time: seconds from 1980-01-06 00:00:00 UTC
For example, 630720013.0 is midnight on January 1, 2000.
Notes
=====
This implementation is strictly a representation of the number of seconds
(including leap seconds) since midnight UTC on 1980-01-06. GPS can also be
considered as a time scale which is ahead of TAI by a fixed offset
(to within about 100 nanoseconds).
For details, see https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer
"""
name = 'gps'
unit = 1.0 / erfa.DAYSEC # in days (1 day == 86400 seconds)
epoch_val = '1980-01-06 00:00:19'
# above epoch is the same as Time('1980-01-06 00:00:00', scale='utc').tai
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimePlotDate(TimeFromEpoch):
"""
Matplotlib `~matplotlib.pyplot.plot_date` input:
1 + number of days from 0001-01-01 00:00:00 UTC
This can be used directly in the matplotlib `~matplotlib.pyplot.plot_date`
function::
>>> import matplotlib.pyplot as plt
>>> jyear = np.linspace(2000, 2001, 20)
>>> t = Time(jyear, format='jyear', scale='utc')
>>> plt.plot_date(t.plot_date, jyear)
>>> plt.gcf().autofmt_xdate() # orient date labels at a slant
>>> plt.draw()
For example, 730120.0003703703 is midnight on January 1, 2000.
"""
# This corresponds to the zero reference time for matplotlib plot_date().
# Note that TAI and UTC are equivalent at the reference time.
name = 'plot_date'
unit = 1.0
epoch_val = 1721424.5 # Time('0001-01-01 00:00:00', scale='tai').jd - 1
epoch_val2 = None
epoch_scale = 'utc'
epoch_format = 'jd'
class TimeStardate(TimeFromEpoch):
"""
Stardate: date units from 2318-07-05 12:00:00 UTC.
For example, stardate 41153.7 is 00:52 on April 30, 2363.
See http://trekguide.com/Stardates.htm#TNG for calculations and reference points
"""
name = 'stardate'
unit = 0.397766856 # Stardate units per day
epoch_val = '2318-07-05 11:00:00' # Date and time of stardate 00000.00
epoch_val2 = None
epoch_scale = 'tai'
epoch_format = 'iso'
class TimeUnique(TimeFormat):
"""
Base class for time formats that can uniquely create a time object
without requiring an explicit format specifier. This class does
nothing but provide inheritance to identify a class as unique.
"""
class TimeAstropyTime(TimeUnique):
"""
Instantiate date from an Astropy Time object (or list thereof).
This is purely for instantiating from a Time object. The output
format is the same as the first time instance.
"""
name = 'astropy_time'
def __new__(cls, val1, val2, scale, precision,
in_subfmt, out_subfmt, from_jd=False):
"""
Use __new__ instead of __init__ to output a class instance that
is the same as the class of the first Time object in the list.
"""
val1_0 = val1.flat[0]
if not (isinstance(val1_0, Time) and all(type(val) is type(val1_0)
for val in val1.flat)):
raise TypeError('Input values for {} class must all be same '
'astropy Time type.'.format(cls.name))
if scale is None:
scale = val1_0.scale
if val1.shape:
vals = [getattr(val, scale)._time for val in val1]
jd1 = np.concatenate([np.atleast_1d(val.jd1) for val in vals])
jd2 = np.concatenate([np.atleast_1d(val.jd2) for val in vals])
else:
val = getattr(val1_0, scale)._time
jd1, jd2 = val.jd1, val.jd2
OutTimeFormat = val1_0._time.__class__
self = OutTimeFormat(jd1, jd2, scale, precision, in_subfmt, out_subfmt,
from_jd=True)
return self
class TimeDatetime(TimeUnique):
"""
Represent date as Python standard library `~datetime.datetime` object
Example::
>>> from astropy.time import Time
>>> from datetime import datetime
>>> t = Time(datetime(2000, 1, 2, 12, 0, 0), scale='utc')
>>> t.iso
'2000-01-02 12:00:00.000'
>>> t.tt.datetime
datetime.datetime(2000, 1, 2, 12, 1, 4, 184000)
"""
name = 'datetime'
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.datetime) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime objects'.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
"""Convert datetime object contained in val1 to jd1, jd2"""
# Iterate through the datetime objects, getting year, month, etc.
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None] + 5*[np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
dt = val.item()
if dt.tzinfo is not None:
dt = (dt - dt.utcoffset()).replace(tzinfo=None)
iy[...] = dt.year
im[...] = dt.month
id[...] = dt.day
ihr[...] = dt.hour
imin[...] = dt.minute
dsec[...] = dt.second + dt.microsecond / 1e6
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, timezone=None, parent=None):
"""
Convert to (potentially timezone-aware) `~datetime.datetime` object.
If ``timezone`` is not ``None``, return a timezone-aware datetime
object.
Parameters
----------
timezone : {`~datetime.tzinfo`, None}, optional
If not `None`, return timezone-aware datetime.
Returns
-------
`~datetime.datetime`
If ``timezone`` is not ``None``, output will be timezone-aware.
"""
if timezone is not None:
if self._scale != 'utc':
raise ScaleValueError("scale is {}, must be 'utc' when timezone "
"is supplied.".format(self._scale))
# Rather than define a value property directly, we have a function,
# since we want to be able to pass in timezone information.
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 6, # 6 for microsec
self.jd1, self.jd2_filled)
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
iterator = np.nditer([iys, ims, ids, ihrs, imins, isecs, ifracs, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=7*[None] + [object])
for iy, im, id, ihr, imin, isec, ifracsec, out in iterator:
if isec >= 60:
raise ValueError('Time {} is within a leap second but datetime '
'does not support leap seconds'
.format((iy, im, id, ihr, imin, isec, ifracsec)))
if timezone is not None:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec,
tzinfo=TimezoneInfo()).astimezone(timezone)
else:
out[...] = datetime.datetime(iy, im, id, ihr, imin, isec, ifracsec)
return self.mask_if_needed(iterator.operands[-1])
value = property(to_value)
class TimeYMDHMS(TimeUnique):
"""
ymdhms: A Time format to represent Time as year, month, day, hour,
minute, second (thus the name ymdhms).
Acceptable inputs must have keys or column names in the "YMDHMS" set of
``year``, ``month``, ``day`` ``hour``, ``minute``, ``second``:
- Dict with keys in the YMDHMS set
- NumPy structured array, record array or astropy Table, or single row
of those types, with column names in the YMDHMS set
One can supply a subset of the YMDHMS values, for instance only 'year',
'month', and 'day'. Inputs have the following defaults::
'month': 1, 'day': 1, 'hour': 0, 'minute': 0, 'second': 0
When the input is supplied as a ``dict`` then each value can be either a
scalar value or an array. The values will be broadcast to a common shape.
Example::
>>> from astropy.time import Time
>>> t = Time({'year': 2015, 'month': 2, 'day': 3,
... 'hour': 12, 'minute': 13, 'second': 14.567},
... scale='utc')
>>> t.iso
'2015-02-03 12:13:14.567'
>>> t.ymdhms.year
2015
"""
name = 'ymdhms'
def _check_val_type(self, val1, val2):
"""
This checks inputs for the YMDHMS format.
It is bit more complex than most format checkers because of the flexible
input that is allowed. Also, it actually coerces ``val1`` into an appropriate
dict of ndarrays that can be used easily by ``set_jds()``. This is useful
because it makes it easy to get default values in that routine.
Parameters
----------
val1 : ndarray or None
val2 : ndarray or None
Returns
-------
val1_as_dict, val2 : val1 as dict or None, val2 is always None
"""
if val2 is not None:
raise ValueError('val2 must be None for ymdhms format')
ymdhms = ['year', 'month', 'day', 'hour', 'minute', 'second']
if val1.dtype.names:
# Convert to a dict of ndarray
val1_as_dict = {name: val1[name] for name in val1.dtype.names}
elif val1.shape == (0,):
# Input was empty list [], so set to None and set_jds will handle this
return None, None
elif (val1.dtype.kind == 'O'
and val1.shape == ()
and isinstance(val1.item(), dict)):
# Code gets here for input as a dict. The dict input
# can be either scalar values or N-d arrays.
# Extract the item (which is a dict) and broadcast values to the
# same shape here.
names = val1.item().keys()
values = val1.item().values()
val1_as_dict = {name: value for name, value
in zip(names, np.broadcast_arrays(*values))}
else:
raise ValueError('input must be dict or table-like')
# Check that the key names now are good.
names = val1_as_dict.keys()
required_names = ymdhms[:len(names)]
def comma_repr(vals):
return ', '.join(repr(val) for val in vals)
bad_names = set(names) - set(ymdhms)
if bad_names:
raise ValueError(f'{comma_repr(bad_names)} not allowed as YMDHMS key name(s)')
if set(names) != set(required_names):
raise ValueError(f'for {len(names)} input key names '
f'you must supply {comma_repr(required_names)}')
return val1_as_dict, val2
def set_jds(self, val1, val2):
if val1 is None:
# Input was empty list []
jd1 = np.array([], dtype=np.float64)
jd2 = np.array([], dtype=np.float64)
else:
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
val1['year'],
val1.get('month', 1),
val1.get('day', 1),
val1.get('hour', 0),
val1.get('minute', 0),
val1.get('second', 0))
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
scale = self.scale.upper().encode('ascii')
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, 9,
self.jd1, self.jd2_filled)
out = np.empty(self.jd1.shape, dtype=[('year', 'i4'),
('month', 'i4'),
('day', 'i4'),
('hour', 'i4'),
('minute', 'i4'),
('second', 'f8')])
out['year'] = iys
out['month'] = ims
out['day'] = ids
out['hour'] = ihmsfs['h']
out['minute'] = ihmsfs['m']
out['second'] = ihmsfs['s'] + ihmsfs['f'] * 10**(-9)
out = out.view(np.recarray)
return self.mask_if_needed(out)
class TimezoneInfo(datetime.tzinfo):
"""
Subclass of the `~datetime.tzinfo` object, used in the
to_datetime method to specify timezones.
It may be safer in most cases to use a timezone database package like
pytz rather than defining your own timezones - this class is mainly
a workaround for users without pytz.
"""
@u.quantity_input(utc_offset=u.day, dst=u.day)
def __init__(self, utc_offset=0 * u.day, dst=0 * u.day, tzname=None):
"""
Parameters
----------
utc_offset : `~astropy.units.Quantity`, optional
Offset from UTC in days. Defaults to zero.
dst : `~astropy.units.Quantity`, optional
Daylight Savings Time offset in days. Defaults to zero
(no daylight savings).
tzname : str or `None`, optional
Name of timezone
Examples
--------
>>> from datetime import datetime
>>> from astropy.time import TimezoneInfo # Specifies a timezone
>>> import astropy.units as u
>>> utc = TimezoneInfo() # Defaults to UTC
>>> utc_plus_one_hour = TimezoneInfo(utc_offset=1*u.hour) # UTC+1
>>> dt_aware = datetime(2000, 1, 1, 0, 0, 0, tzinfo=utc_plus_one_hour)
>>> print(dt_aware)
2000-01-01 00:00:00+01:00
>>> print(dt_aware.astimezone(utc))
1999-12-31 23:00:00+00:00
"""
if utc_offset == 0 and dst == 0 and tzname is None:
tzname = 'UTC'
self._utcoffset = datetime.timedelta(utc_offset.to_value(u.day))
self._tzname = tzname
self._dst = datetime.timedelta(dst.to_value(u.day))
def utcoffset(self, dt):
return self._utcoffset
def tzname(self, dt):
return str(self._tzname)
def dst(self, dt):
return self._dst
class TimeString(TimeUnique):
"""
Base class for string-like time representations.
This class assumes that anything following the last decimal point to the
right is a fraction of a second.
This is a reference implementation can be made much faster with effort.
"""
def _check_val_type(self, val1, val2):
if val1.dtype.kind not in ('S', 'U') and val1.size:
raise TypeError('Input values for {} class must be strings'
.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def parse_string(self, timestr, subfmts):
"""Read time from a single string, using a set of possible formats."""
# Datetime components required for conversion to JD by ERFA, along
# with the default values.
components = ('year', 'mon', 'mday', 'hour', 'min', 'sec')
defaults = (None, 1, 1, 0, 0, 0)
# Assume that anything following "." on the right side is a
# floating fraction of a second.
try:
idot = timestr.rindex('.')
except Exception:
fracsec = 0.0
else:
timestr, fracsec = timestr[:idot], timestr[idot:]
fracsec = float(fracsec)
for _, strptime_fmt_or_regex, _ in subfmts:
if isinstance(strptime_fmt_or_regex, str):
try:
tm = time.strptime(timestr, strptime_fmt_or_regex)
except ValueError:
continue
else:
vals = [getattr(tm, 'tm_' + component)
for component in components]
else:
tm = re.match(strptime_fmt_or_regex, timestr)
if tm is None:
continue
tm = tm.groupdict()
vals = [int(tm.get(component, default)) for component, default
in zip(components, defaults)]
# Add fractional seconds
vals[-1] = vals[-1] + fracsec
return vals
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
def set_jds(self, val1, val2):
"""Parse the time strings contained in val1 and set jd1, jd2"""
# Select subformats based on current self.in_subfmt
subfmts = self._select_subfmts(self.in_subfmt)
# Be liberal in what we accept: convert bytes to ascii.
# Here .item() is needed for arrays with entries of unequal length,
# to strip trailing 0 bytes.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None, None, None, None, None, None],
flags=['zerosize_ok'],
op_dtypes=[None] + 5 * [np.intc] + [np.double])
for val, iy, im, id, ihr, imin, dsec in iterator:
val = to_string(val)
iy[...], im[...], id[...], ihr[...], imin[...], dsec[...] = (
self.parse_string(val, subfmts))
jd1, jd2 = erfa.dtf2d(self.scale.upper().encode('ascii'),
*iterator.operands[1:])
self.jd1, self.jd2 = day_frac(jd1, jd2)
def str_kwargs(self):
"""
Generator that yields a dict of values corresponding to the
calendar date and time for the internal JD values.
"""
scale = self.scale.upper().encode('ascii'),
iys, ims, ids, ihmsfs = erfa.d2dtf(scale, self.precision,
self.jd1, self.jd2_filled)
# Get the str_fmt element of the first allowed output subformat
_, _, str_fmt = self._select_subfmts(self.out_subfmt)[0]
if '{yday:' in str_fmt:
has_yday = True
else:
has_yday = False
yday = None
ihrs = ihmsfs['h']
imins = ihmsfs['m']
isecs = ihmsfs['s']
ifracs = ihmsfs['f']
for iy, im, id, ihr, imin, isec, ifracsec in np.nditer(
[iys, ims, ids, ihrs, imins, isecs, ifracs],
flags=['zerosize_ok']):
if has_yday:
yday = datetime.datetime(iy, im, id).timetuple().tm_yday
yield {'year': int(iy), 'mon': int(im), 'day': int(id),
'hour': int(ihr), 'min': int(imin), 'sec': int(isec),
'fracsec': int(ifracsec), 'yday': yday}
def format_string(self, str_fmt, **kwargs):
"""Write time to a string using a given format.
By default, just interprets str_fmt as a format string,
but subclasses can add to this.
"""
return str_fmt.format(**kwargs)
@property
def value(self):
# Select the first available subformat based on current
# self.out_subfmt
subfmts = self._select_subfmts(self.out_subfmt)
_, _, str_fmt = subfmts[0]
# TODO: fix this ugly hack
if self.precision > 0 and str_fmt.endswith('{sec:02d}'):
str_fmt += '.{fracsec:0' + str(self.precision) + 'd}'
# Try to optimize this later. Can't pre-allocate because length of
# output could change, e.g. year rolls from 999 to 1000.
outs = []
for kwargs in self.str_kwargs():
outs.append(str(self.format_string(str_fmt, **kwargs)))
return np.array(outs).reshape(self.jd1.shape)
class TimeISO(TimeString):
"""
ISO 8601 compliant date-time format "YYYY-MM-DD HH:MM:SS.sss...".
For example, 2000-01-01 00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'iso'
subfmts = (('date_hms',
'%Y-%m-%d %H:%M:%S',
# XXX To Do - use strftime for output ??
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%d %H:%M',
'{year:d}-{mon:02d}-{day:02d} {hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
def parse_string(self, timestr, subfmts):
# Handle trailing 'Z' for UTC time
if timestr.endswith('Z'):
if self.scale != 'utc':
raise ValueError("Time input terminating in 'Z' must have "
"scale='UTC'")
timestr = timestr[:-1]
return super().parse_string(timestr, subfmts)
class TimeISOT(TimeISO):
"""
ISO 8601 compliant date-time format "YYYY-MM-DDTHH:MM:SS.sss...".
This is the same as TimeISO except for a "T" instead of space between
the date and time.
For example, 2000-01-01T00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'isot'
subfmts = (('date_hms',
'%Y-%m-%dT%H:%M:%S',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y-%m-%dT%H:%M',
'{year:d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}'),
('date',
'%Y-%m-%d',
'{year:d}-{mon:02d}-{day:02d}'))
class TimeYearDayTime(TimeISO):
"""
Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...".
The day-of-year (DOY) goes from 001 to 365 (366 in leap years).
For example, 2000:001:00:00:00.000 is midnight on January 1, 2000.
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date_hm': date + hours, mins
- 'date': date
"""
name = 'yday'
subfmts = (('date_hms',
'%Y:%j:%H:%M:%S',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}'),
('date_hm',
'%Y:%j:%H:%M',
'{year:d}:{yday:03d}:{hour:02d}:{min:02d}'),
('date',
'%Y:%j',
'{year:d}:{yday:03d}'))
class TimeDatetime64(TimeISOT):
name = 'datetime64'
def _check_val_type(self, val1, val2):
if not val1.dtype.kind == 'M':
if val1.size > 0:
raise TypeError('Input values for {} class must be '
'datetime64 objects'.format(self.name))
else:
val1 = np.array([], 'datetime64[D]')
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
# If there are any masked values in the ``val1`` datetime64 array
# ('NaT') then stub them with a valid date so downstream parse_string
# will work. The value under the mask is arbitrary but a "modern" date
# is good.
mask = np.isnat(val1)
masked = np.any(mask)
if masked:
val1 = val1.copy()
val1[mask] = '2000'
# Make sure M(onth) and Y(ear) dates will parse and convert to bytestring
if val1.dtype.name in ['datetime64[M]', 'datetime64[Y]']:
val1 = val1.astype('datetime64[D]')
val1 = val1.astype('S')
# Standard ISO string parsing now
super().set_jds(val1, val2)
# Finally apply mask if necessary
if masked:
self.jd2[mask] = np.nan
@property
def value(self):
precision = self.precision
self.precision = 9
ret = super().value
self.precision = precision
return ret.astype('datetime64')
class TimeFITS(TimeString):
"""
FITS format: "[±Y]YYYY-MM-DD[THH:MM:SS[.sss]]".
ISOT but can give signed five-digit year (mostly for negative years);
The allowed subformats are:
- 'date_hms': date + hours, mins, secs (and optional fractional secs)
- 'date': date
- 'longdate_hms': as 'date_hms', but with signed 5-digit year
- 'longdate': as 'date', but with signed 5-digit year
See Rots et al., 2015, A&A 574:A36 (arXiv:1409.7583).
"""
name = 'fits'
subfmts = (
('date_hms',
(r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:04d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('date',
r'(?P<year>\d{4})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:04d}-{mon:02d}-{day:02d}'),
('longdate_hms',
(r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)T'
r'(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d(\.\d*)?)'),
'{year:+06d}-{mon:02d}-{day:02d}T{hour:02d}:{min:02d}:{sec:02d}'),
('longdate',
r'(?P<year>[+-]\d{5})-(?P<mon>\d\d)-(?P<mday>\d\d)',
'{year:+06d}-{mon:02d}-{day:02d}'))
# Add the regex that parses the scale and possible realization.
# Support for this is deprecated. Read old style but no longer write
# in this style.
subfmts = tuple(
(subfmt[0],
subfmt[1] + r'(\((?P<scale>\w+)(\((?P<realization>\w+)\))?\))?',
subfmt[2]) for subfmt in subfmts)
def parse_string(self, timestr, subfmts):
"""Read time and deprecated scale if present"""
# Try parsing with any of the allowed sub-formats.
for _, regex, _ in subfmts:
tm = re.match(regex, timestr)
if tm:
break
else:
raise ValueError('Time {} does not match {} format'
.format(timestr, self.name))
tm = tm.groupdict()
# Scale and realization are deprecated and strings in this form
# are no longer created. We issue a warning but still use the value.
if tm['scale'] is not None:
warnings.warn("FITS time strings should no longer have embedded time scale.",
AstropyDeprecationWarning)
# If a scale was given, translate from a possible deprecated
# timescale identifier to the scale used by Time.
fits_scale = tm['scale'].upper()
scale = FITS_DEPRECATED_SCALES.get(fits_scale, fits_scale.lower())
if scale not in TIME_SCALES:
raise ValueError("Scale {!r} is not in the allowed scales {}"
.format(scale, sorted(TIME_SCALES)))
# If no scale was given in the initialiser, set the scale to
# that given in the string. Realization is ignored
# and is only supported to allow old-style strings to be
# parsed.
if self._scale is None:
self._scale = scale
if scale != self.scale:
raise ValueError("Input strings for {} class must all "
"have consistent time scales."
.format(self.name))
return [int(tm['year']), int(tm['mon']), int(tm['mday']),
int(tm.get('hour', 0)), int(tm.get('min', 0)),
float(tm.get('sec', 0.))]
@property
def value(self):
"""Convert times to strings, using signed 5 digit if necessary."""
if 'long' not in self.out_subfmt:
# If we have times before year 0 or after year 9999, we can
# output only in a "long" format, using signed 5-digit years.
jd = self.jd1 + self.jd2
if jd.size and (jd.min() < 1721425.5 or jd.max() >= 5373484.5):
self.out_subfmt = 'long' + self.out_subfmt
return super().value
class TimeEpochDate(TimeNumeric):
"""
Base class for support floating point Besselian and Julian epoch dates
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(val1 + val2)
self.jd1, self.jd2 = day_frac(jd1, jd2)
def to_value(self, **kwargs):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
value = jd_to_epoch(self.jd1, self.jd2)
return super().to_value(jd1=value, jd2=0., **kwargs)
value = property(to_value)
class TimeBesselianEpoch(TimeEpochDate):
"""Besselian Epoch year as floating point value(s) like 1950.0"""
name = 'byear'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
def _check_val_type(self, val1, val2):
"""Input value validation, typically overridden by derived classes"""
if hasattr(val1, 'to') and hasattr(val1, 'unit'):
raise ValueError("Cannot use Quantities for 'byear' format, "
"as the interpretation would be ambiguous. "
"Use float with Besselian year instead. ")
# FIXME: is val2 really okay here?
return super()._check_val_type(val1, val2)
class TimeJulianEpoch(TimeEpochDate):
"""Julian Epoch year as floating point value(s) like 2000.0"""
name = 'jyear'
unit = erfa.DJY # 365.25, the Julian year, for conversion to quantities
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
class TimeEpochDateString(TimeString):
"""
Base class to support string Besselian and Julian epoch dates
such as 'B1950.0' or 'J2000.0' respectively.
"""
_default_scale = 'tt' # As of astropy 3.2, this is no longer 'utc'.
def set_jds(self, val1, val2):
epoch_prefix = self.epoch_prefix
# Be liberal in what we accept: convert bytes to ascii.
to_string = (str if val1.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([val1, None], op_dtypes=[val1.dtype, np.double],
flags=['zerosize_ok'])
for val, years in iterator:
try:
time_str = to_string(val)
epoch_type, year_str = time_str[0], time_str[1:]
year = float(year_str)
if epoch_type.upper() != epoch_prefix:
raise ValueError
except (IndexError, ValueError, UnicodeEncodeError):
raise ValueError('Time {} does not match {} format'
.format(time_str, self.name))
else:
years[...] = year
self._check_scale(self._scale) # validate scale.
epoch_to_jd = getattr(erfa, self.epoch_to_jd)
jd1, jd2 = epoch_to_jd(iterator.operands[-1])
self.jd1, self.jd2 = day_frac(jd1, jd2)
@property
def value(self):
jd_to_epoch = getattr(erfa, self.jd_to_epoch)
years = jd_to_epoch(self.jd1, self.jd2)
# Use old-style format since it is a factor of 2 faster
str_fmt = self.epoch_prefix + '%.' + str(self.precision) + 'f'
outs = [str_fmt % year for year in years.flat]
return np.array(outs).reshape(self.jd1.shape)
class TimeBesselianEpochString(TimeEpochDateString):
"""Besselian Epoch year as string value(s) like 'B1950.0'"""
name = 'byear_str'
epoch_to_jd = 'epb2jd'
jd_to_epoch = 'epb'
epoch_prefix = 'B'
class TimeJulianEpochString(TimeEpochDateString):
"""Julian Epoch year as string value(s) like 'J2000.0'"""
name = 'jyear_str'
epoch_to_jd = 'epj2jd'
jd_to_epoch = 'epj'
epoch_prefix = 'J'
class TimeDeltaFormatMeta(TimeFormatMeta):
_registry = TIME_DELTA_FORMATS
class TimeDeltaFormat(TimeFormat, metaclass=TimeDeltaFormatMeta):
"""Base class for time delta representations"""
def _check_scale(self, scale):
"""
Check that the scale is in the allowed list of scales, or is `None`
"""
if scale is not None and scale not in TIME_DELTA_SCALES:
raise ScaleValueError("Scale value '{}' not in "
"allowed values {}"
.format(scale, TIME_DELTA_SCALES))
return scale
class TimeDeltaNumeric(TimeDeltaFormat, TimeNumeric):
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
self.jd1, self.jd2 = day_frac(val1, val2, divisor=1. / self.unit)
def to_value(self, **kwargs):
# Note that 1/unit is always exactly representable, so the
# following multiplications are exact.
factor = 1. / self.unit
jd1 = self.jd1 * factor
jd2 = self.jd2 * factor
return super().to_value(jd1=jd1, jd2=jd2, **kwargs)
value = property(to_value)
class TimeDeltaSec(TimeDeltaNumeric):
"""Time delta in SI seconds"""
name = 'sec'
unit = 1. / erfa.DAYSEC # for quantity input
class TimeDeltaJD(TimeDeltaNumeric):
"""Time delta in Julian days (86400 SI seconds)"""
name = 'jd'
unit = 1.
class TimeDeltaDatetime(TimeDeltaFormat, TimeUnique):
"""Time delta in datetime.timedelta"""
name = 'datetime'
def _check_val_type(self, val1, val2):
if not all(isinstance(val, datetime.timedelta) for val in val1.flat):
raise TypeError('Input values for {} class must be '
'datetime.timedelta objects'.format(self.name))
if val2 is not None:
raise ValueError(
f'{self.name} objects do not accept a val2 but you provided {val2}')
return val1, None
def set_jds(self, val1, val2):
self._check_scale(self._scale) # Validate scale.
iterator = np.nditer([val1, None, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None, np.double, np.double])
day = datetime.timedelta(days=1)
for val, jd1, jd2 in iterator:
jd1[...], other = divmod(val.item(), day)
jd2[...] = other / day
self.jd1, self.jd2 = day_frac(iterator.operands[-2],
iterator.operands[-1])
@property
def value(self):
iterator = np.nditer([self.jd1, self.jd2, None],
flags=['refs_ok', 'zerosize_ok'],
op_dtypes=[None, None, object])
for jd1, jd2, out in iterator:
jd1_, jd2_ = day_frac(jd1, jd2)
out[...] = datetime.timedelta(days=jd1_,
microseconds=jd2_ * 86400 * 1e6)
return self.mask_if_needed(iterator.operands[-1])
def _validate_jd_for_storage(jd):
if isinstance(jd, (float, int)):
return np.array(jd, dtype=np.float_)
if (isinstance(jd, np.generic)
and (jd.dtype.kind == 'f' and jd.dtype.itemsize <= 8
or jd.dtype.kind in 'iu')):
return np.array(jd, dtype=np.float_)
elif (isinstance(jd, np.ndarray)
and jd.dtype.kind == 'f'
and jd.dtype.itemsize == 8):
return jd
else:
raise TypeError(
f"JD values must be arrays (possibly zero-dimensional) "
f"of floats but we got {jd!r} of type {type(jd)}")
def _broadcast_writeable(jd1, jd2):
if jd1.shape == jd2.shape:
return jd1, jd2
# When using broadcast_arrays, *both* are flagged with
# warn-on-write, even the one that wasn't modified, and
# require "C" only clears the flag if it actually copied
# anything.
shape = np.broadcast(jd1, jd2).shape
if jd1.shape == shape:
s_jd1 = jd1
else:
s_jd1 = np.require(np.broadcast_to(jd1, shape),
requirements=["C", "W"])
if jd2.shape == shape:
s_jd2 = jd2
else:
s_jd2 = np.require(np.broadcast_to(jd2, shape),
requirements=["C", "W"])
return s_jd1, s_jd2
from .core import Time, TIME_SCALES, TIME_DELTA_SCALES, ScaleValueError # noqa
|
"""
Views to support bulk email functionalities like opt-out.
"""
import logging
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.http import Http404
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.edxmako.shortcuts import render_to_response
from lms.djangoapps.bulk_email.models import Optout
from lms.djangoapps.courseware.courses import get_course_by_id
from lms.djangoapps.discussion.notification_prefs.views import UsernameCipher, UsernameDecryptionException
log = logging.getLogger(__name__)
def opt_out_email_updates(request, token, course_id):
"""
A view that let users opt out of any email updates.
This meant is meant to be the target of an opt-out link or button.
The `token` parameter must decrypt to a valid username.
The `course_id` is the string course key of any course.
Raises a 404 if there are any errors parsing the input.
"""
try:
username = UsernameCipher().decrypt(token).decode("utf-8")
user = User.objects.get(username=username)
course_key = CourseKey.from_string(course_id)
course = get_course_by_id(course_key, depth=0)
except UnicodeDecodeError:
raise Http404("base64url") # lint-amnesty, pylint: disable=raise-missing-from
except UsernameDecryptionException as exn:
raise Http404(str(exn)) # lint-amnesty, pylint: disable=raise-missing-from
except User.DoesNotExist:
raise Http404("username") # lint-amnesty, pylint: disable=raise-missing-from
except InvalidKeyError:
raise Http404("course") # lint-amnesty, pylint: disable=raise-missing-from
unsub_check = request.POST.get('unsubscribe', False)
context = {
'course': course,
'unsubscribe': unsub_check
}
if request.method == 'GET':
return render_to_response('bulk_email/confirm_unsubscribe.html', context)
if request.method == 'POST' and unsub_check:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
return render_to_response('bulk_email/unsubscribe_success.html', context)
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wjy#uh74#jux98w2v$nwgmx1+!=o8wb+)zvkoz35p%(ykd6xyi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'project.blog',
'floppyforms',
'django_backend',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
# Implement a cost-proportional method of setting
# variable-specific rho values for the progressive hedging
# algorithm. Automatically retrieve cost parameters from the
# active objective function.
# See CP(*) strategy described in Watson, J. P., & Woodruff, D. L. (2011). Progressive hedging innovations for a class of stochastic mixed-integer resource allocation problems. Computational Management Science.
# Note, sympy is a pre-requisite.
# Install via `sudo pip install sympy`
# Implementation notes:
# I couldn't find hooks in Pyomo to extract the cost
# coefficient for each decision variable. The best I
# could get was a text representation of the objective
# expression, a formula which is not simplified into
# an ultimate cost vector.
# I use sympy to parse the text of the equation, simplify
# it, and extract coefficients for each variable.
# The only problem is that the variables are formatted
# with indexes (ex. foo[2020,bar]) and sympy can't parse
# those. So, I replace them with unique ids (ex. x123456)
# before giving the formula to sympy, and reverse the process
# after sympy has finished parsing.
import StringIO
from re import findall
from sympy import sympify
from pyomo.environ import Objective
import sys, time
def ph_rhosetter_callback(ph, scenario_tree, scenario):
rho_coefficient = 1.0
scenario_instance = scenario._instance
symbol_map = scenario_instance._ScenarioTreeSymbolMap
objective = scenario_instance.component_data_objects(
Objective, active=True, descend_into=True )
objective = objective.next()
string_out = StringIO.StringIO()
objective.expr.to_string(ostream=string_out)
objective_as_str = string_out.getvalue()
string_out.close()
# Find indexed variables like BuildCap[2030, CA_LADWP]
# using a regular expression. See python documentation.
# The first part (?<=[^a-zA-Z]) ensures search pattern is
# not preceeded by a letter.
# The regex returns two parts because I used
# two sets of parenthesis. I don't care about the second
# parenthesis that returns the indexed bits, just the larger
# part
tiempo0 = time.time()
#print "Scenario X -------------------------------------"+str(tiempo0)
#sys.stdout.flush()
pattern = "(?<=[^a-zA-Z])([a-zA-Z][a-zA-Z_0-9]*(\[[^]]*\])?)"
component_by_alias = {}
variable_list = findall(pattern, objective_as_str)
for (cname, index_as_str) in variable_list:
component = scenario_instance.find_component(cname)
alias = "x" + str(id(component))
component_by_alias[alias] = component
objective_as_str = objective_as_str.replace(cname, alias)
tiempo1=time.time()-tiempo0
#print "Replaced Objective Function variables with ID's in: "+str(tiempo1)+"-------"
#sys.stdout.flush()
# After the variables+indexes have clean names,
# parse the equation with sympify
obj_expr = sympify(objective_as_str)
tiempo2=time.time()-tiempo1
#print "Sympyfied OF string in: "+str(tiempo2)+"-------"
#sys.stdout.flush()
for (alias, component) in component_by_alias.iteritems():
tiempo3=time.time()
variable_id = symbol_map.getSymbol(component)
coefficient = obj_expr.coeff(alias)
set_rho = False
for tree_node in scenario._node_list:
if variable_id in tree_node._standard_variable_ids:
ph.setRhoOneScenario(
tree_node,
scenario,
variable_id,
coefficient * rho_coefficient)
set_rho = True
break
if set_rho == False:
print("Warning! Could not find tree node for variable {}; rho not set.".format(component.cname()))
#else:
# tiempo4=time.time()-tiempo3
# print "Rho for "+str(component)+" has been set in: "+str(tiempo4)+"s, with a value of: "+str(coefficient)
# sys.stdout.flush()
|
#!/usr/bin/python
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import urllib
from bonkbot.bot.decorators import command
def init_plugin(config, irc_client):
return [stock]
@command('stock')
def stock(message):
"""quote [symbol] - Display information for the stock [symbol]."""
args = message.data.split()
if len(args) > args.index('stock') + 1:
symbol = args[args.index('stock') + 1]
price = _lookup(symbol, 'l1')
change = _lookup(symbol, 'c1')
message.reply(symbol + ' - Price: $' + price + ', Change: $' + change)
def _lookup(symbol, f):
url = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (symbol, f)
value = urllib.urlopen(url).read().strip().strip('"')
return value
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import String, Column, ForeignKey
from sqlalchemy.orm import validates
from aquilon.aqdb.model import Resource
_TN = 'hostlink'
class Hostlink(Resource):
""" Hostlink resources """
__tablename__ = _TN
__mapper_args__ = {'polymorphic_identity': 'hostlink'}
resource_id = Column(ForeignKey(Resource.id, ondelete='CASCADE'),
primary_key=True)
target = Column(String(255), nullable=False)
owner_user = Column(String(32), default='root', nullable=False)
owner_group = Column(String(32), nullable=True)
__table_args__ = ({'info': {'unique_fields': ['name', 'holder']}},)
@validates(owner_user, owner_group)
def validate_owner(self, key, value):
if ':' in value:
raise ValueError("%s cannot contain the ':' character" % key)
return value
|
#!/usr/bin/env python
import optparse
from sys import *
import os,sys,re
from optparse import OptionParser
import glob
import subprocess
from os import system
import linecache
import time
#=========================
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("%prog -i <ctf from appion> --path=<path to micros> --appion=<appion base name to remove> --cs=<cs> --kev=<kev> ")
parser.add_option("-i",dest="ctf",type="string",metavar="FILE",
help="CTF parameter file from estimateCTF_CTFFIND3.py")
parser.add_option("-o",dest="microstar",type="string",metavar="FILE",
help="Output name for relion .star file with micrograph information")
parser.add_option("--path",dest="folder",type="string",metavar="STRING",
help="Relative path to micrographs that Relion will use (e.g. 'Micrographs')")
parser.add_option("--cs",dest="cs",type="float",metavar="FLOAT",
help="Spherical aberration (Cs) of microscope (mm)")
parser.add_option("--kev",dest="kev",type="int",metavar="INT",
help="Accelerating voltage of microscope (keV)")
parser.add_option("--pixel",dest="detector",type="float",metavar="float",
help="Pixel size of detector (um) (K2 = 14 um)")
parser.add_option("--mag",dest="mag",type="int",metavar="INT",
help="Nominal magnification of microscope")
parser.add_option("--ampcontrast",dest="ampcontrast",type="float",metavar="float",
help="Amplitude contrast of images (cryo: 0.07)")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 0:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#=============================
def checkConflicts(params):
if not os.path.exists(params['ctf']):
print "\nError: CTF file '%s' does not exist\n" % params['CTF']
sys.exit()
if os.path.exists(params['microstar']):
print '\nError: output file %s already exists. Exiting.' %(params['microstar'])
sys.exit()
#===============================
def convertToRelionCTF(params):
ctf = open(params['ctf'],'r')
for line in ctf:
l = line.split()
if l[-1] == 'Astig':
continue
#Prepare micrograph name
if params['debug'] is True:
print line
micro = l[0].split('/')[-1]
microname = '%s/%s' %(params['folder'],micro)
if params['debug'] is True:
print 'Microname=%s' %( microname)
ctflog = micro[:-4]+'_ctffind3.log'
#Get defocus information
df1 = float(l[1])
df2 = float(l[2])
astig = float(l[3])
ampcontrast = params['ampcontrast']
crosscorr = 0.5
#Check if new ctf log file exists
if os.path.exists(ctflog):
print '%s already exists. Exiting.' %(ctflog)
sys.exit()
#Open new ctf log file
ctf='\n'
ctf+=' CTF DETERMINATION, V3.5 (9-Mar-2013)\n'
ctf+=' Distributed under the GNU General Public License (GPL)\n'
ctf+='\n'
ctf+=' Parallel processing: NCPUS = 4\n'
ctf+='\n'
ctf+=' Input image file name\n'
ctf+='%s\n' %(microname)
ctf+='\n'
ctf+='\n'
ctf+=' Output diagnostic file name\n'
ctf+='%s.ctf\n'%(microname[:-4])
ctf+='\n'
ctf+='\n'
ctf+=' CS[mm], HT[kV], AmpCnst, XMAG, DStep[um]\n'
ctf+=' %.1f %.1f %.2f %.1f %.3f\n' %(params['cs'],params['kev'],ampcontrast,params['mag'],params['detector'])
ctf+='\n'
ctf+='\n'
ctf+=' DFMID1 DFMID2 ANGAST CC\n'
ctf+='\n'
ctf+=' %.2f\t%.2f\t%.2f\t%.5f\tFinal Values\n' %(df1,df2,astig,crosscorr)
outctf = open(ctflog,'w')
outctf.write(ctf)
outctf.close()
#================================
def convertToRelionSTAR(params):
relionOut = writeRelionHeader()
out = open(params['microstar'],'w')
ctf = open(params['ctf'],'r')
for line in ctf:
l = line.split()
if l[-1] == 'Astig':
continue
#Prepare micrograph name
if params['debug'] is True:
print line
micro = l[0].split('/')[-1]
microname = '%s/%s' %(params['folder'],micro)
if params['debug'] is True:
print 'Microname=%s' %( microname)
#Get defocus information
df1 = float(l[1])
df2 = float(l[2])
astig = float(l[3])
ampcontrast = params['ampcontrast']
crosscorr = 0.5
relionOut+='%s %.6f %.6f %.6f %.6f %.6f %.6f %.6g %.6f %.6f\n' %(microname,df1,df2,astig,params['kev'],params['cs'],ampcontrast,params['mag'],params['detector'],crosscorr)
out.write(relionOut)
#================================
def writeRelionHeader():
relion='\n'
relion+='data_\n'
relion+='\n'
relion+='loop_\n'
relion+='_rlnMicrographName #1\n'
relion+='_rlnDefocusU #2\n'
relion+='_rlnDefocusV #3\n'
relion+='_rlnDefocusAngle #4\n'
relion+='_rlnVoltage #5\n'
relion+='_rlnSphericalAberration #6\n'
relion+='_rlnAmplitudeContrast #7\n'
relion+='_rlnMagnification #8\n'
relion+='_rlnDetectorPixelSize #9\n'
relion+='_rlnCtfFigureOfMerit #10\n'
return relion
#==============================
if __name__ == "__main__":
params=setupParserOptions()
checkConflicts(params)
convertToRelionCTF(params)
convertToRelionSTAR(params)
|
#!/usr/bin/python3
import time, os, sys, socket, math, atexit
import RPi.GPIO as GPIO
try:
import thread
except ImportError:
import _thread as thread
class interrupt_watcher(object):
def __init__(self, sensorPin, bounceTime, peak_sample = 5, peak_monitor = False):
self.interrupt_count = 0
self.running = True
self.interrupt_peak_count = 0
self.interrupt_peak_max = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(sensorPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(sensorPin, GPIO.FALLING, callback=self.interrupt_call_back, bouncetime=bounceTime)
if peak_monitor:
thread.start_new_thread(self.peak_monitor, (peak_sample,))
def interrupt_call_back(self, channel):
self.interrupt_count += 1
self.interrupt_peak_count += 1
def get_value(self):
return self.interrupt_count
def get_peak(self):
return self.interrupt_peak_max
def reset_count(self):
self.interrupt_count = 0
self.interrupt_peak_count = 0
self.interrupt_peak_max = 0
def peak_monitor(self, sample_period):
while self.running:
time.sleep(sample_period)
if self.interrupt_peak_count > self.interrupt_peak_max:
self.interrupt_peak_max = self.interrupt_peak_count
self.interrupt_peak_count = 0
def __del__(self):
self.running = False
class wind_speed_interrupt_watcher(interrupt_watcher):
def __init__(self, radius_cm, sensorPin, bounceTime, calibration = 2.36):
super(wind_speed_interrupt_watcher, self).__init__(sensorPin, bounceTime, peak_sample = 5, peak_monitor = True)
circumference_cm = (2 * math.pi) * radius_cm
self.circumference = circumference_cm / 100000.0 #circumference in km
self.calibration = calibration
self.last_time = time.time()
def calculate_speed(self, interrupt_count, interval_seconds):
rotations = interrupt_count / 2.0
distance_per_second = (self.circumference * rotations) / interval_seconds
speed_per_hour = distance_per_second * 3600
return speed_per_hour * self.calibration
def get_wind_speed(self):
return self.calculate_speed(self.get_value(), time.time() - self.last_time)
def get_wind_gust_speed(self):
return self.calculate_speed(self.get_peak(), 5) #5 seconds
def reset_timer(self):
self.last_time = time.time()
class rainfall_interrupt_watcher(interrupt_watcher):
def __init__(self, tip_volume, sensorPin, bounceTime):
super(rainfall_interrupt_watcher, self).__init__(sensorPin, bounceTime)
self.tip_volume = tip_volume
def get_rainfall(self):
return self.tip_volume * self.get_value()
class interrupt_daemon(object):
def __init__(self, port):
self.running = False
self.port = port
self.socket_data = "{0}\n"
def setup(self):
self.rain = rainfall_interrupt_watcher(0.2794, 6, 300) #Maplin rain gauge = 0.2794 ml per bucket tip, was 27 on prototype
self.wind = wind_speed_interrupt_watcher(9.0, 5, 1) #Maplin anemometer = radius of 9 cm, was 17 on prototype
try:
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.skt.bind(("127.0.0.1", self.port))
self.running = True
except socket.error as e:
print(e)
raise
self.skt.listen(10)
def send(self, conn, s):
conn.sendall(self.socket_data.format(s).encode('utf-8'))
def receive(self, conn, length):
data = conn.recv(length)
return data.decode('utf-8')
def handle_connection(self, conn):
connected = True
self.send(conn, "OK")
while connected and self.running:
data = self.receive(conn, 128)
if len(data) > 0:
data = data.strip()
if data == "RAIN":
self.send(conn, self.rain.get_rainfall())
elif data == "WIND":
self.send(conn, self.wind.get_wind_speed())
elif data == "GUST":
self.send(conn, self.wind.get_wind_gust_speed())
elif data == "RESET":
self.reset_counts()
self.send(conn, "OK")
elif data == "BYE":
connected = False
elif data == "STOP":
connected = False
self.stop()
conn.close()
def reset_counts(self):
self.rain.reset_count()
self.wind.reset_count()
self.wind.reset_timer()
def daemonize(self):
# do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177)
# first fork
try:
self.pid = os.fork()
if self.pid > 0:
sys.exit(0)
except OSError as e:
print(e)
raise
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# second fork
try:
self.pid = os.fork()
if self.pid > 0:
sys.exit(0)
except OSError as e:
print(e)
raise
# close file descriptors
sys.stdout.flush()
sys.stderr.flush()
def start(self):
try:
self.daemon_pid = None
self.daemonize()
self.daemon_pid = os.getpid()
print("PID: %d" % self.daemon_pid)
self.setup()
while self.running:
conn, addr = self.skt.accept() #blocking call
if self.running:
thread.start_new_thread(self.handle_connection, (conn,))
except Exception:
if self.running:
self.stop()
finally:
if self.daemon_pid == os.getpid():
self.skt.shutdown(socket.SHUT_RDWR)
self.skt.close()
GPIO.cleanup()
print("Stopped")
def stop(self):
self.running = False
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(("localhost", self.port)) #release blocking call
def send_stop_signal(port):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(("localhost", port))
client.sendall("STOP".encode('utf-8'))
client.close()
if __name__ == "__main__":
server_port = 49501
if len(sys.argv) >= 2:
arg = sys.argv[1].upper()
if arg == "START":
interrupt_daemon(server_port).start()
elif arg == "STOP":
send_stop_signal(server_port)
elif arg == "RESTART":
send_stop_signal(server_port)
time.sleep(1)
interrupt_daemon(server_port).start()
else:
print("usage: sudo {0} start|stop|restart".format(sys.argv[0]))
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from numpy import partition
except ImportError:
warnings.warn('Using `sort` instead of partition.'
'Upgrade numpy to 1.8 for better performace on large number'
'of clusters')
def partition(a, kth, axis=-1, kind='introselect', order=None):
return np.sort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
def parallel_helper(obj, methodname, *args, **kwargs):
"""Helper to workaround Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
if sp_version < (0, 13, 0):
def rankdata(a, method='average'):
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
else:
from scipy.stats import rankdata
|
"""
Scraper for Indiana Supreme Court
CourtID: ind
Court Short Name: Ind.
Auth: Jon Andersen <[email protected]>
Reviewer: mlr
History:
2014-09-03: Created by Jon Andersen
"""
from juriscraper.OpinionSite import OpinionSite
import time
from datetime import date
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.url = 'http://www.in.gov/judiciary/opinions/supreme.html'
self.court_id = self.__module__
self.my_precedential_statuses = []
def _get_case_names(self):
raw_case_names = [s for s in self.html.xpath('//dl/dt/a/text()')]
case_names = []
self.my_precedential_statuses = []
for case_name in raw_case_names:
if case_name.find("(NFP)") >= 0:
case_names.append(case_name.replace("(NFP)", "").strip())
self.my_precedential_statuses.append("Unpublished")
else:
case_names.append(case_name)
self.my_precedential_statuses.append("Published")
return case_names
def _get_download_urls(self):
return [s for s in self.html.xpath('//dl/dt/a/@href')]
def _get_case_dates(self):
dates = []
for date_string in self.html.xpath('//dl/dd/dd/dd/text()'):
date_string = date_string.strip()
if date_string == '':
dates.append('')
else:
dates.append(date.fromtimestamp(
time.mktime(time.strptime(date_string, '%m/%d/%y'))))
return dates
def _get_docket_numbers(self):
return [s for s in self.html.xpath('//dl/dd/text()')]
def _get_lower_court_numbers(self):
return [e if e.strip() != "N/A" else "" for e in self.html.xpath('//dl/dd/dd/text()')]
def _get_precedential_statuses(self):
return self.my_precedential_statuses
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""set_server_default
Revision ID: 5446f2a45467
Revises: 2db5203cb7a9
Create Date: 2014-07-07 18:31:30.384522
"""
# revision identifiers, used by Alembic.
revision = '5446f2a45467'
down_revision = '2db5203cb7a9'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql
from neutron.plugins.cisco.common import cisco_constants
PLUGINS = {
'brocade': 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2',
'cisco': 'neutron.plugins.cisco.network_plugin.PluginV2',
'ml2': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'mlnx': 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin',
'vmware': [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin',
],
'agents': [
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
'neutron.plugins.nec.nec_plugin.NECPluginV2',
'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2',
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
],
}
def upgrade(active_plugins=None, options=None):
run(active_plugins, True)
def downgrade(active_plugins=None, options=None):
run(active_plugins, None)
def run(active_plugins, default):
if PLUGINS['ml2'] in active_plugins:
set_default_ml2(default)
if PLUGINS['mlnx'] in active_plugins:
set_default_agents(default)
set_default_mlnx(default)
if PLUGINS['brocade'] in active_plugins:
set_default_agents(default)
set_default_brocade(default)
if PLUGINS['cisco'] in active_plugins:
set_default_cisco(default)
if set(PLUGINS['vmware']) & set(active_plugins):
set_default_vmware(default)
set_default_agents(default)
if set(PLUGINS['agents']) & set(active_plugins):
set_default_agents(default)
def set_default_brocade(default):
if default:
default = ''
op.alter_column('brocadeports', 'port_id',
server_default=default, existing_type=sa.String(36))
def set_default_mlnx(default):
if default:
default = sqlalchemy.sql.false()
op.alter_column('segmentation_id_allocation', 'allocated',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
def set_default_cisco(default):
profile_binding_default = (cisco_constants.TENANT_ID_NOT_SET
if default else None)
profile_default = '0' if default else None
if default:
default = sqlalchemy.sql.false()
op.alter_column('cisco_n1kv_profile_bindings', 'tenant_id',
existing_type=sa.String(length=36),
server_default=profile_binding_default,
existing_nullable=False)
op.alter_column('cisco_network_profiles', 'multicast_ip_index',
server_default=profile_default, existing_type=sa.Integer)
op.alter_column('cisco_n1kv_vlan_allocations', 'allocated',
existing_type=sa.Boolean,
server_default=default, existing_nullable=False)
op.alter_column('cisco_n1kv_vxlan_allocations', 'allocated',
existing_type=sa.Boolean,
server_default=default, existing_nullable=False)
def set_default_vmware(default=None):
if default:
default = sqlalchemy.sql.false()
op.alter_column('nsxrouterextattributess', 'service_router',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
op.alter_column('nsxrouterextattributess', 'distributed',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
op.alter_column('qosqueues', 'default',
server_default=default, existing_type=sa.Boolean)
def set_default_agents(default=None):
if default:
default = sqlalchemy.sql.true()
op.alter_column('agents', 'admin_state_up',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
def set_default_ml2(default=None):
if default:
default = sqlalchemy.sql.false()
op.alter_column('ml2_gre_allocations', 'allocated',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
op.alter_column('ml2_vxlan_allocations', 'allocated',
server_default=default, existing_nullable=False,
existing_type=sa.Boolean)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: leepstools.file.angle
.. moduleauthor:: Hendrix Demers <[email protected]>
Read angle distribution result from LEEPS simulation.
"""
###############################################################################
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules.
# Globals and constants variables.
class Angle():
def __init__(self):
self.theta_deg = []
self.probability_1_sr = []
self.stu_1_sr = []
def read(self, file_path):
lines = []
with open(file_path) as input_file:
lines = input_file.readlines()
for line in lines:
line = line .strip()
if not line.startswith('#'):
items = line.split()
try:
theta_deg = float(items[0])
probability_1_sr = float(items[1])
stu_1_sr = float(items[2])
self.theta_deg.append(theta_deg)
self.probability_1_sr.append(probability_1_sr)
self.stu_1_sr.append(stu_1_sr)
except IndexError:
pass
|
import tensorflow as tf
from tensorflow.contrib import rnn as rnn_cell
import numpy as np
import io
from util.tf_utils import tf_confusion_metrics
import inspect
import util.eval as eval
class Model():
"""
Tensorflow Graph using Recurrent LSTM layers and fully connected softmax layer for field identification
with multispectral/temporal data acquired from satellite imagery
Params
tf placeholders:
X Input data cube of dimensions [batch_size x max_observations x n_input]
y Target data Tensor of dimensions [batch_size x max_observations]
seq_lenghts Number of observations for each batch if observation < max_obs data is
padded with zeros [batch_size]
input parameters:
n_input length of observed pixel values. [n_pixels * n_bands + n_time]
n_pixels number of observed pixels (default 3*3)
n_bands number of observed bands (default 6)
n_time number of time parameters (default 1 e.g. day of year)
n_classes number of target classes
batch_size number of batches
max_obs maximum number of observations if seq_lengs < max_obs matrices will be padded
controls number of iterations in rnn layers (aka sequence length)
network specific parameters
n_layers number of rnn layers (aka depth)
learning_rate
dropout_keep_prob
logdir
[email protected]
"""
def __init__(self, n_input=9 * 6 + 1, n_classes=20, batch_size=50, max_obs=26,
n_layers=2, dropout_keep_prob=.5, adam_lr=1e-3, adam_b1=0.9, adam_b2=0.999, adam_eps=1e-8,
fc_w_stddev=0.1, fc_b_offset=0.1, n_cell_per_input=1,rnn_cell_type="basiclstm", gpu=None):
# save input arguments
self.args = inspect.getargvalues(inspect.currentframe()).locals
del self.args["self"] # delete self argument
self.n_classes = n_classes
with tf.device(None):
with tf.variable_scope('input'):
# block of [batch_size x max_obs x n_input]
self.X = tf.placeholder(tf.float32, [batch_size, max_obs, n_input], name="X")
self.y_ = self.y = y_ = tf.placeholder(tf.float32, [batch_size, max_obs, n_classes], name="y")
self.seq_lengths = seq_lengths = tf.placeholder(tf.int32, [batch_size], name="seq_lengths")
#self.y = y = tf.reshape(self.y_, [-1, n_classes], name="y")
with tf.name_scope('RNN'):
self.n_rnn_cells = n_rnn_cells = n_cell_per_input * n_input
if rnn_cell_type == "basiclstm":
cell = rnn_cell.BasicLSTMCell(n_rnn_cells)
if rnn_cell_type == "lstm":
cell = rnn_cell.LSTMCell(n_rnn_cells)
if rnn_cell_type == "lstm_peephole":
cell = rnn_cell.LSTMCell(n_rnn_cells, use_peepholes=True)
elif rnn_cell_type == "gru":
cell = rnn_cell.BasicLSTMCell(n_rnn_cells)
elif rnn_cell_type == "rnn":
cell = rnn_cell.BasicRNNCell(n_rnn_cells)
# dropout Wrapper
cell = tf.contrib.rnn.DropoutWrapper(cell=cell, output_keep_prob=dropout_keep_prob)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * n_layers)
# tensor with class labels of dimension [batch_size x max_obs]
# defined as Variable to carry values to next iteration (not trainable must be declared explicitly)
self.state = state = cell.zero_state(batch_size, tf.float32)
# rnn_outputs: block of [batch_size x max_obs x rnn_size]
# data is padded with zeros after seq_length
outputs, last_states = tf.nn.dynamic_rnn(cell, self.X, initial_state=state, sequence_length=seq_lengths,
time_major=False)
self.outputs = outputs
self.last_states = last_states
with tf.name_scope('fc'):
# reshape outputs to: block of [batch_size * max_obs x rnn_size]
softmax_in = tf.reshape(outputs, [-1, n_rnn_cells])
softmax_w = tf.Variable(tf.truncated_normal([n_rnn_cells, n_classes], stddev=fc_w_stddev), name="W_softmax")
softmax_b = tf.Variable(tf.constant(fc_b_offset, shape=[n_classes]), name="b_softmax")
softmax_out = tf.matmul(softmax_in, softmax_w) + softmax_b
self.logits = logits = tf.reshape(softmax_out, [batch_size, -1, n_classes])
with tf.name_scope('train'):
# Define loss and optimizer
# create mask for cross entropies incases where seq_lengths < max_max_obs
# masking from http://stackoverflow.com/questions/34128104/tensorflow-creating-mask-of-varied-lengths
with tf.name_scope('mask'):
lengths_transposed = tf.expand_dims(seq_lengths, 1)
range = tf.range(0, max_obs, 1)
range_row = tf.expand_dims(range, 0)
self.mask = mask = tf.less(range_row, lengths_transposed)
self.cross_entropy_matrix = cross_entropy_matrix = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_)
self.masked_cross_entropy_matrix = masked_cross_entropy_matrix = tf.where(mask, cross_entropy_matrix,
tf.zeros(mask.get_shape()))
self.cross_entropy_matrix = cross_entropy_matrix = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_)
# normalize with total number of observations
self.cross_entropy = cross_entropy = tf.reduce_sum(cross_entropy_matrix) / tf.cast(
tf.reduce_sum(seq_lengths), tf.float32)
tf.summary.scalar('cross_entropy', cross_entropy)
# grad_train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
self.train_op = tf.train.AdamOptimizer(learning_rate=adam_lr, beta1=adam_b1, beta2=adam_b2,
epsilon=adam_eps).minimize(cross_entropy)
# tf.summary.scalar('learning_rate', learning_rate)
with tf.name_scope('evaluation'):
self.probabilities = probs = tf.nn.softmax(logits, name="full_probability_matrix")
# Evaluate model
predicted = tf.argmax(logits, 2)
targets = tf.argmax(y_, 2)
correct_pred = tf.equal(predicted, targets)
masked_correct_pred = tf.logical_and(mask, correct_pred)
self.accuracy_op = accuracy = tf.reduce_sum(tf.cast(masked_correct_pred, tf.float32)) / tf.cast(
tf.reduce_sum(seq_lengths), tf.float32)
tf.summary.scalar('accuracy', accuracy)
self.probs_list = probs_list = tf.reshape(probs, (-1, n_classes))
predicted_list = tf.reshape(predicted, [-1])
targets_list = tf.reshape(targets, [-1])
mask_list = tf.reshape(mask, [-1])
one_hot_targets = tf.one_hot(targets_list, n_classes)
scores = tf.boolean_mask(probs_list, tf.cast(one_hot_targets, tf.bool))
# mask of individual number of observations
obs_list = tf.tile(tf.range(0, max_obs), [batch_size])
obs_matrix = tf.matmul(tf.expand_dims(obs_list, 1), tf.ones([1, n_classes], dtype=tf.int32))
probs_matrix_mask = probs_matrix_mask = tf.transpose(tf.reshape(tf.tile(mask_list, [n_classes]),[n_classes,-1]))
self.scores = tf.boolean_mask(probs_list, probs_matrix_mask)
self.targets = tf.boolean_mask(tf.reshape(y_, [-1,n_classes]), probs_matrix_mask)
self.obs = tf.boolean_mask(obs_list, mask_list)
# drop all values which are > seqlength
#self.scores = tf.boolean_mask(scores, mask_list)
#self.targets = tf.boolean_mask(targets_list, mask_list)
#self.obs = tf.boolean_mask(obs_list, mask_list)
self.confusion_matrix = confusion_matrix = tf.contrib.metrics.confusion_matrix(
tf.boolean_mask(targets_list, mask_list),
tf.boolean_mask(predicted_list, mask_list),
num_classes=n_classes)
confusion_matrix = tf.cast(confusion_matrix, tf.uint8)
confusion_matrix = tf.expand_dims(confusion_matrix, 2)
confusion_matrix = tf.expand_dims(confusion_matrix, 0)
tf.summary.image("confusion matrix", confusion_matrix, max_outputs=3)
logits_ = tf.cast(logits, tf.uint8)
logits_ = tf.expand_dims(logits_, 3)
tf.summary.image("logits", logits_, max_outputs=1)
probs_ = tf.cast(probs*255, tf.uint8)
probs_ = tf.expand_dims(probs_, 3)
tf.summary.image("probabilities", probs_, max_outputs=1)
targets_ = tf.cast(y_, tf.uint8)
targets_ = tf.expand_dims(targets_, 3)
tf.summary.image("targets", targets_, max_outputs=1)
# tf.add_to_collection(tf.GraphKeys.SUMMARIES, cm_im_summary)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
self.merge_summary_op = tf.summary.merge_all()
self.init_op = tf.global_variables_initializer()
def main():
# model = Model()
test()
def test():
import os
import pickle
n_input = 9 * 6 + 1
n_classes = 20
batch_size = 50
max_obs = 26
n_classes = 38
confusion_matrix = np.zeros((n_classes, n_classes), dtype=int)
model = Model(n_input=n_input, n_classes=n_classes, n_layers=2, batch_size=batch_size,
adam_lr=1e-3, dropout_keep_prob=0.5, n_cell_per_input=4)
savedir = "tmp"
if not os.path.exists(savedir):
os.makedirs(savedir)
# dump pickle args for loading
pickle.dump(model.args, open(os.path.join(savedir, "args.pkl"), "wb"))
# dump human readable args
open(os.path.join(savedir, "args.txt"), "w").write(str(model.args))
init_from = "tmp"
if init_from is not None:
args = pickle.load(open(os.path.join(init_from, "args.pkl"), "rb"))
X = np.random.rand(batch_size, max_obs, n_input)
y = np.random.rand(batch_size, max_obs, n_classes)
seq_length = np.random.randint(16, max_obs, batch_size)
summaryWriter = tf.summary.FileWriter("tensorboard/test", graph=tf.get_default_graph())
with tf.Session() as sess:
sess.run([model.init_op])
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_length}
# training step
for i in range(1, 30):
train_op, cross_entropy, new_confusion_matrix = \
sess.run([model.train_op,
model.cross_entropy,
model.confusion_matrix], feed_dict=feed)
confusion_matrix += new_confusion_matrix
print(cross_entropy)
a,b = eval.class_evaluation(confusion_matrix)
scores, targets = sess.run([model.scores, tf.reshape(model.targets, [-1])], feed_dict=feed)
fpr, tpr, threshold = roc_curve(targets, scores, 0)
summary = sess.run(model.merge_summary_op, feed_dict=feed)
summaryWriter.add_summary(summary, i)
#buf = plots.plot_confusion_matrix(confusion_matrix, range(1, n_classes))
#image = tf.image.decode_png(buf.getvalue(), channels=4)
#image = tf.expand_dims(image, 0)
#summary_op = tf.image_summary("matplotlib conf matrix", image)
#summary = sess.run(summary_op)
# summaryWriter.add_summary(summary, i)
print("done")
if __name__ == '__main__':
main()
|
import wx
from wxPython.wx import *
import sys
import treedrawing
import traceback
import thread
import os
import webbrowser
import time
import util
ID_LAUNCH_SERVER = wx.NewId()
ID_CHROME = wx.NewId()
# needed for py2exe to work properly
#sys.stdout = open( os.path.expanduser("~/annotald.out.log.txt"), "w" )
sys.stderr = util.Blackhole() # open( os.path.expanduser("~/annotald.err.log.txt"), "w" )
sys.stdout = util.Blackhole()
#sys.stderr = None
class TaskBarApp(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, -1, title, size = (1, 1),
style=wx.FRAME_NO_TASKBAR|wx.NO_FULL_REPAINT_ON_RESIZE)
self.tbicon = wx.TaskBarIcon()
self.tbicon.SetIcon(wx.Icon('user-plain-red.png', wx.BITMAP_TYPE_PNG), "Annotald")
self.tbicon.Bind(wx.EVT_TASKBAR_RIGHT_UP,self.ShowMenu)
self.tbicon.Bind(wx.EVT_MENU, self.startServer, id=ID_LAUNCH_SERVER)
self.tbicon.Bind(wx.EVT_MENU, self.startChrome, id=ID_CHROME)
self.tbmenu = wx.Menu()
self.tbopen = self.tbmenu.Append(ID_LAUNCH_SERVER, 'Open File')
self.tbchrome = self.tbmenu.Append(ID_CHROME, 'Chrome to: localhost:8080')
# self.tbmenu.Append(ID_START_CHROME, '')
self.Show(True)
def ShowMenu(self, event):
self.tbicon.PopupMenu(self.tbmenu)
#self.startChrome()
#raise SystemExit(0)
def getPsdPath(self):
# Create an open file dialog
try:
dialog = wxFileDialog ( None, style = wxOPEN, message = 'Hey what\'s up, please pick a psd file for Annotald' )
# Show the dialog and get user input
if dialog.ShowModal() == wxID_OK:
print 'Selected:', dialog.GetPath()
path = dialog.GetPath()
dialog.Destroy()
return path
# The user did not select anything
else:
dialog.Destroy()
return None
except:
print('>>> traceback <<<')
traceback.print_exc()
print('>>> end of traceback <<<')
def startServer(self,event):
thread.start_new_thread(self.serverThread, ())
def serverThread(self):
#print('stuff')
filename = self.getPsdPath()
if filename is None:
pass
else:
args = [filename]
try:
# wait for cherrypy, TODO: check when server is running
time.sleep(4)
self.startChrome()
self.tbopen.Enable(False)
self.tbicon.SetIcon(wx.Icon('user-plain-blue.png', wx.BITMAP_TYPE_PNG), "Annotald")
treedrawing._main(args)
self.tbicon.SetIcon(wx.Icon('user-plain-red.png', wx.BITMAP_TYPE_PNG), "Annotald")
self.tbopen.Enable(True)
except:
print('>>> traceback <<<')
traceback.print_exc()
print('>>> end of traceback <<<')
def startChrome(self,event=None):
thread.start_new_thread(self.chromeThread, ())
def chromeThread(self):
os.system("start chrome localhost:8080")
class AnnotaldRunner(wx.App):
def OnInit(self):
frame = TaskBarApp(None, -1, ' ')
frame.Center(wx.BOTH)
frame.Show(False)
#args = ['../test.psd']
#treedrawing._main(args)
return True
def _main(argv=None):
if argv is None:
argv = sys.argv
app = AnnotaldRunner(0)
app.MainLoop()
if __name__ == '__main__':
_main()
|
import time
try:
from imagehash import imagehash
except ImportError:
imagehash = None
print "Missing imagehash lib"
import os
from image import Image
UNWANTED = r'C:\Users\j.gabes\Desktop\export\unwanted'
DELETED = r'C:\Users\j.gabes\Desktop\export\deleted'
# Mine are for debug, on other pc, use documents ones
if not os.path.exists(UNWANTED):
UNWANTED = os.path.expanduser('~/Documents/mangle_unwanted')
if not os.path.exists(UNWANTED):
os.mkdir(UNWANTED)
if not os.path.exists(DELETED):
DELETED = os.path.expanduser('~/Documents/mangle_deleted')
if not os.path.exists(DELETED):
os.mkdir(DELETED)
THRESHOLD = 6
HASH_SIZE = 10
class Similarity(object):
def __init__(self):
self._unwanted_hashes = {}
self._clean()
self._load()
self._sum_time = 0.0
self._nb_deleted = 0
def _clean(self):
if not os.path.exists(DELETED):
print "CANNOT SAVE DELETED"
return
print " * Cleaning deleted dir: %s" % DELETED
for f_path in os.listdir(DELETED):
full_path = os.path.join(DELETED, f_path)
os.unlink(full_path)
def _load(self):
t0 = time.time()
print " * Loading unwanted images: %s" % UNWANTED
for f_path in os.listdir(UNWANTED):
full_path = os.path.join(UNWANTED, f_path)
hash = imagehash.average_hash(Image.open(full_path), hash_size=HASH_SIZE)
self._unwanted_hashes[f_path] = hash
print " - %s hashed loaded in %.3fs" % (len(self._unwanted_hashes), time.time() - t0)
def add_deleted_image(self, f_path, image, diff, do_move):
self._nb_deleted += 1
print " * Image is unwanted (from %s), deleted=%s" % (f_path, self._nb_deleted)
save_deleted_path = os.path.join(DELETED, 'deleted_%s--diff_%s__%s.jpg' % (f_path, diff, self._nb_deleted))
if do_move:
image.save(save_deleted_path)
def is_valid_image(self, image, do_move=True):
if imagehash is None:
print "ERROR: cannot compare unwanted image"
return True
elapsed_at_start = int(self._sum_time)
t0 = time.time()
hash = imagehash.average_hash(image, hash_size=HASH_SIZE)
is_valid = True
for (f_path, ref_hash) in self._unwanted_hashes.iteritems():
diff = hash - ref_hash
if diff <= THRESHOLD:
self.add_deleted_image(f_path, image, diff, do_move=do_move)
# self._nb_deleted += 1
# print " * Image is unwanted (from %s), deleted=%s" % (f_path, self._nb_deleted)
# save_deleted_path = os.path.join(DELETED, 'deleted_%s--diff_%s__%s.jpg' % (f_path, diff, self._nb_deleted))
# if do_move:
# image.save(save_deleted_path)
is_valid = False
break
self._sum_time += (time.time() - t0)
if int(self._sum_time) != elapsed_at_start:
print "[UNWANTED:] Consume time= %s" % int(self._sum_time)
return is_valid
similarity = Similarity()
|
import re
import six
NON_ALPHA_NUMERIC_REGEX = re.compile('[^0-9a-zA-Z]+')
class ParserNameGenerator(object):
def __init__(self, parsers):
self._parsers = parsers
def is_free_parser_name(self, parser_name, black_list):
return (parser_name not in self._parsers) and (parser_name not in black_list)
def _create_name_from_words(self, words, black_list, words_count_in_name):
proposed_name = '_'.join(words[:words_count_in_name])
if self.is_free_parser_name(proposed_name, black_list):
return proposed_name
return self._find_free_by_number_appending(proposed_name, black_list)
def propose_parser_name(self, line, regex_str, black_list, words_count_in_name):
building_words = self._get_building_words(line, regex_str) or ['parser_name']
return self._create_name_from_words(building_words, black_list, words_count_in_name)
@classmethod
def _get_building_words(cls, line, pattern_str):
pattern = re.compile(pattern_str)
matcher = pattern.match(line)
if matcher is not None:
groups = matcher.groups()
for i in six.moves.range(len(groups)):
line = line.replace(groups[i], ' ')
return [re.sub(NON_ALPHA_NUMERIC_REGEX, ' ', word.lower()) for word in line.split()]
def _find_free_by_number_appending(self, proposed_name, black_list):
for i in six.moves.range(len(black_list) + 1):
propsed_name = proposed_name + str(i + 1)
if self.is_free_parser_name(propsed_name, black_list):
return propsed_name
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.check_process_status import check_process_status
from titan_service import titan_service
import titan
class TitanServer(Script):
def get_component_name(self):
return "titan-server"
def install(self, env):
self.install_packages(env)
def configure(self, env, upgrade_type=None):
import params
env.set_params(params)
titan.titan(type='server', upgrade_type=upgrade_type)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select("titan-server", params.version)
conf_select.select(params.stack_name, "titan", params.version)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
titan_service(action = 'start')
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
titan_service(action = 'stop')
def status(self, env, upgrade_type=None):
import params_server
check_process_status(params_server.titan_pid_file)
if __name__ == "__main__":
TitanServer().execute()
|
from getpass import getpass
class JenniferGmailSharedSettings(object):
def get_accounts(self):
try:
return self.settings['accounts']
except (KeyError, TypeError):
return []
@staticmethod
def initialize_settings(settings_template_dict):
"""
Custom initialize settings
"""
settings_template_dict['accounts'] = []
prompt = "> "
# A helper to get the ask
def ask_add_gmail():
return "Do you want to add {} Gmail Account (y/n)?".format("a" if len(settings_template_dict['accounts']) == 0 else "another")
print(ask_add_gmail())
answer = input(prompt)
while answer.lower() not in ['n', 'no']:
current_email = {}
print("Email Address:")
current_email['email'] = input(prompt)
print("Password:")
current_email['password'] = getpass(prompt)
print("Name (Personal, Work, etc):")
current_email['name'] = input(prompt).lower()
print("Receive notifications when you have new emails for this account? (y/n)")
current_email['notifyNewEmails'] = input(prompt).lower() in ['y', 'yes']
settings_template_dict['accounts'].append(current_email)
# Ask about more
print(ask_add_gmail())
answer = input(prompt)
if len(settings_template_dict['accounts']) >= 1:
print("Should I mark emails as read when I read them out loud to you? (y/n)")
answer = input(prompt)
settings_template_dict['markAsRead'] = answer.lower() in ['y', 'yes']
else:
settings_template_dict['enabled'] = False
return settings_template_dict
|
""" Exceptions for the Janrain API library. """
class JanrainApiException(Exception):
""" Base class for all Janrain API exceptions. """
pass
class JanrainCredentialsError(Exception):
""" Exception for credential errors (eg. Missing credentials) """
pass
class JanrainConfigError(KeyError):
""" Exception for credential configuration file errors """
def __init__(self, message=None, **kwargs):
try:
if message is None:
message = "Could not find key '{}' in '{}'." \
.format(kwargs['key'], kwargs['file'])
finally:
KeyError.__init__(self, message)
class JanrainInvalidUrlError(JanrainApiException):
""" Invalid URL. """
# DEPRECATED (bad application names include an error in the JSON response)
pass
class ApiResponseError(JanrainApiException):
""" An error response from the capture API. """
def __init__(self, code, error, error_description, response):
JanrainApiException.__init__(self, error_description)
self.code = code
self.error = error
self.response = response
|
#!/usr/bin/env python
"""Find approximate area under curve: Supports simpson, trapezoid, and
midpoint algorithms, n-degree single variable polynomials, and variable step size
"""
import ast
import getopt
import math
import sys
import logging
from dataclasses import dataclass
LOGGER = logging.getLogger()
LOGGER.setLevel(10)
USAGE = """ -p|--poly {DegreeN1:CoefficientM1, DegreeN2:CoefficientM2, ...}
-l|--lower <lower_bound> -u|--upper <upper_bound> -s|--step <step>
-a|--algorithm <simpson | trapezoid | midpoint>
defaults: step_size:1, lower_bound:0, upper_bound:10, algorithm:trapezoid
e.g. To evaluate the area of y=x^2 + 2x -2 from [1-50] with .1 width sums and the midpoint algorithm:
python area_under_curve.py --poly "{2:1, 1:2, 0:-2}" --lower 1 --upper 50 --step .1 --algorithm midpoint
"""
FULL_USAGE = USAGE
class Polynomial:
"""Single variable polynomial class supporting n degrees"""
def __init__(self, coefficient_dict):
""" The coefficient dict keys are the term orders, and the values are the coefficients
e.g
f(x) = 3x^2 would be expressed as {2:3}
f(x) = 9x^5 + 3 would be {5:9, 0:3}
"""
self.fractional_exponents = False
self.coefficient_dict = coefficient_dict
if any_negative(coefficient_dict):
raise ValueError("Only positive exponents supported")
self.fractional_exponents = any_non_int_numbers(coefficient_dict)
def format_term(self, degree, value):
"""string format a single term"""
value_formatted = str(value)
if value == 1:
value_formatted = ""
if value == 0:
return None
if degree == 0:
return str(value)
if degree == 1:
return f"{value_formatted}x"
return f"{value_formatted}x^{degree}"
def __str__(self):
"""string format the entire polynomial"""
terms = []
degrees = list(self.coefficient_dict)
degrees = sorted(degrees, reverse=True)
for degree in degrees:
term_formatted = (self.format_term(degree, self.coefficient_dict[degree]))
if term_formatted:
terms.append(term_formatted)
if not terms:
return "f(x)=0"
return f"f(x)={' + '.join(terms)}"
def evaluate(self, value):
"""Evaluate the polynomial at a given value"""
total = 0
for degree in self.coefficient_dict:
coefficient = self.coefficient_dict[degree]
if self.fractional_exponents and value < 0:
raise ValueError("Fractional exponents not supported for negative inputs.")
current_term = math.pow(value, degree)* coefficient
total += current_term
return total
class Bounds:
"""Range of values class"""
def __init__(self, lower_bound, upper_bound, step_size):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.step_size = step_size
if step_size <= 0:
raise ValueError("step size must be > 0")
if upper_bound <= lower_bound:
raise ValueError("invalid bounds")
self.full_range = self.float_range(lower_bound, upper_bound, step_size)
def __str__(self):
return f"Bounds: [{self.lower_bound} - {self.upper_bound}], step_size: {self.step_size}"
def float_range(self, lower_bound, upper_bound, step_size):
"""Create range of floats"""
float_list = []
current = lower_bound
float_list.append(current)
# Final number should be almost equal to upper bound.
# Adding fraction of step_size offset to account for rounding errors.
while current + step_size < (upper_bound + (step_size * .1)):
current += step_size
float_list.append(current)
return float_list
@dataclass
class Parameters:
"""Contains several groups of parameters"""
polynomial: Polynomial
bounds: Bounds
algorithm: str
@classmethod
def factory(cls, polynomial_coefficients, #pylint: disable=too-many-arguments
lower, upper, step, algorithm):
"""Create parameters object from polynomial, bounds, and algorithm parameters"""
bounds = Bounds(lower, upper, step)
polynomial = Polynomial(polynomial_coefficients)
return cls(polynomial, bounds, algorithm)
# Misc helper functions
def is_number(string):
"""Simple check to see if string is valid number"""
try:
float(string)
return True
except ValueError as err:
LOGGER.error(f"Error: {string} {str(err)}")
return False
def any_non_int_numbers(collection):
"""Returns true if any numbers in the collection are not integers"""
return any(map(lambda n: not isinstance(n, int), collection))
def any_negative(collection):
"""Returns true if any numbers in the collection are < 0"""
return any(map(lambda n: n < 0, collection))
def has_property(name):
"""Simple function property decorator"""
def wrap(func):
"""Wrapper function"""
setattr(func, name, True)
return func
return wrap
# Argument parsing
def parse_commandline_arguments(argv): # pylint: disable=too-many-return-statements,too-many-branches
"""Parse command line arguments and return a parameters
object with Bounds, Polynomial, and Algorithm
"""
#defaults
lower = 0
upper = 10
step_size = 1
algorithm = "trapezoid"
polynomial_coefficients = {}
try:
opts, _ = getopt.getopt(argv, "hl:u:s:a:p:",
["lower=", "upper=", "step=",
"algorithm=", "polynomial=", "help"])
non_numerical_params = ["-a", "--algorithm", "-p", "--polynomial", "-h", "--help"]
numerical_params = list(filter(lambda t: t[0] not in non_numerical_params, opts))
if any(map(lambda n: not is_number(n[1]), numerical_params)):
logging.error("Error in numerical arguments.")
return None
except getopt.GetoptError as err:
LOGGER.error(f"Option error: {str(err)}")
return None
for opt, arg in opts:
if opt in ("-h", "--help"):
LOGGER.info(FULL_USAGE)
sys.exit(0)
elif opt in ("-l", "--lower"):
lower = float(arg)
elif opt in ("-u", "--upper"):
upper = float(arg)
elif opt in ("-s", "--step"):
step_size = float(arg)
elif opt in ("-a", "--algorithm"):
algorithm = arg
elif opt in ("-p", "--polynomial"):
polynomial_coefficients = parse_polynomial_coefficients(arg)
if step_size <= 0:
LOGGER.error(f"step size must be > 0: {step_size}")
return None
if lower >= upper:
LOGGER.error(f"invalid bounds: {lower} {upper}")
return None
if (lower < 0 or upper < 0) and any_non_int_numbers(polynomial_coefficients):
LOGGER.error("Fractional exponents not supported for negative values.")
return None
algorithm_function = get_algorithm(algorithm)
if not algorithm_function:
LOGGER.error(f"Algorithm : {algorithm} not found!")
return None
if not polynomial_coefficients:
LOGGER.error("Polynomial not specified or invalid")
return None
if any_negative(polynomial_coefficients):
LOGGER.error("Only positive exponents supported")
return None
return Parameters.factory(polynomial_coefficients,
lower, upper, step_size, algorithm_function)
def parse_polynomial_coefficients(dict_literal):
"""Try to parse string into dictionary, return None on failure"""
coefficient_dict = {}
try:
coefficient_dict = ast.literal_eval(dict_literal)
except SyntaxError as errs:
LOGGER.error(f"Syntax Error parsing polynomial args: {dict_literal} {str(errs)}")
except ValueError as errv:
logging.error(f"Value Error parsing polynomial args: {dict_literal} {str(errv)}")
return None
if not isinstance(coefficient_dict, dict):
LOGGER.error(f"Malformed dictionary: {coefficient_dict}")
return None
return coefficient_dict
# Algorithms and utilities
@has_property("algorithm")
def midpoint(poly, lower, upper):
"""Calculate midpoint slice from two polynomial evaluations and step size"""
value = poly.evaluate((upper+lower)/2.0)
return (upper - lower) * value
@has_property("algorithm")
def trapezoid(poly, lower, upper):
"""Calculate trapezoid slice from two polynomial evaluations and step size"""
lower_value = poly.evaluate(lower)
upper_value = poly.evaluate(upper)
return (upper - lower) * ((lower_value + upper_value)/2.0)
@has_property("algorithm")
def simpson(poly, lower, upper):
"""Calculate parabola (Simpson) slice from two polynomial evaluations and step size"""
lower_value = poly.evaluate(lower)
upper_value = poly.evaluate(upper)
midpoint_value = poly.evaluate((lower+upper)/2.0)
return ((upper - lower) / 6.0) * (lower_value + 4 * midpoint_value + upper_value)
def get_algorithm(algorithm_name):
"""Get algorithm function by name by looking up in globals with the 'algorithm' attribute set"""
if algorithm_name in globals() and "algorithm" in dir(globals()[algorithm_name]):
return globals()[algorithm_name]
LOGGER.error(f"Algorithm {algorithm_name} not found or invalid!")
return None
# High-level implementation
def area_under_curve(poly, bounds, algorithm):
"""Finds the area under a polynomial between the specified bounds
using a rectangle-sum (of width 1) approximation.
"""
LOGGER.info(poly)
LOGGER.info(bounds)
LOGGER.info(f"Algorithm: {algorithm.__name__}")
range_upper_index = len(bounds.full_range) - 1
total_area = 0
for range_index, val in enumerate(bounds.full_range):
# Can't calculate trapezoid with only lower bound value, so we're done summing.
if range_index == range_upper_index:
return total_area
total_area += algorithm(poly, val, bounds.full_range[range_index + 1])
return total_area
# Entrypoints
def area_under_curve_argv(args):
"""Command-line entrypoint"""
parsed_parameters = parse_commandline_arguments(args[1:])
if not parsed_parameters:
print(FULL_USAGE)
sys.exit(2)
area = area_under_curve(parsed_parameters.polynomial,
parsed_parameters.bounds, parsed_parameters.algorithm)
print(f"Total Area ({parsed_parameters.algorithm.__name__}) = {area}")
if __name__ == '__main__':
FULL_USAGE = f'{__doc__}\nUsage: python {sys.argv[0]} {USAGE}'
area_under_curve_argv(sys.argv)
|
#! /usr/bin/env python
'''
Event level performance evaluation quantified in terms of Asimov significance.
It compares the performance of three old strategies (mHmatch, pThigh, pTjb)
with that of the BDT. The BDT performance is evaluated after excluding events
in which the highest BDT score is < threshold. For many threshold values, the
performance can be computed in paralled.
Output:
plot + pickled dictionary
Run:
python evaluate_event_performance.py --strategy root_tmva \
--sample_names SM_bkg_photon_jet SM_hh X275 X300 (...) --intervals 21
'''
import cPickle
import glob
import logging
import os
from itertools import izip
from joblib import Parallel, delayed
import numpy as np
import time
from tabulate import tabulate
from bbyy_jet_classifier import utils
from bbyy_jet_classifier.plotting import plot_asimov
def main(strategy, category, lower_bound, intervals):
logger = logging.getLogger("event_performance.main")
# -- test N(=intervals) various threshold values in the range [lower_bound, 1]
# where lower_bound = 0 for sklearn, = -1 for tmva
THRESHOLD = np.linspace(lower_bound, 1, intervals)
# -- print some info to the user to confirm the settings
logger.info("Strategy: {}".format(strategy))
logger.info("Category: {}".format(category))
logger.info("Threshold values: {}".format(THRESHOLD))
# -- find samples by looking at those contained in ./output/<category>/pickles/
base_directory = os.path.join("output", category, "pickles")
sample_names = os.listdir(base_directory)
bkg_sample_name = [ x for x in sample_names if "bkg" in x ][0]
logger.info("Processing data from {} samples...".format(len(sample_names)))
pickle_paths = sum(
[glob.glob(
os.path.join(
base_directory,
sample_name,
"{}_event_performance_dump.pkl".format(strategy)
)
) for sample_name in sample_names], []
# Q: why adding an empty list?
# A: sum( list_of_lists, [] ) is a quick way to flatten a list of lists without using libraries.
)
logger.info("Found {} datasets to load...".format(len(pickle_paths)))
# -- read in data from each pickle file & evaluate event-level performance
perf_dict = {}
for sample_name, path in zip(sample_names,pickle_paths):
start_time = time.time()
logger.info("Reading: {}...".format(path))
d = cPickle.load(open(path, "rb"))
perf_dict[sample_name] = eval_performance( # performance evaluation
d["yhat_test_ev"],
d["yhat_mHmatch_test_ev"],
d["yhat_pThigh_test_ev"],
d["yhat_pTjb_test_ev"],
d["y_event"],
d["mjb_event"],
d["w_test"],
THRESHOLD=THRESHOLD
)
logger.info("Done in {:.2f} seconds".format(time.time()-start_time))
# TO-DO: it would be nicer if the dictionary was indexed by threshold, instead of containing a 2d list of thresholds and asimovs
headers = sorted([s for s in sample_names if s != bkg_sample_name])
if hasattr(THRESHOLD, "__iter__"):
asimov_dict = {
_sample_name: {
strategy: map(np.array, [THRESHOLD, [asimov(s, b) for s, b in zip(perf_dict[_sample_name][strategy], perf_dict[bkg_sample_name][strategy])]])
for strategy in ["BDT", "mHmatch", "pThigh", "pTjb"]
}
for _sample_name in headers#[ x for x in sample_names if x != bkg_sample_name ]
}
else:
asimov_dict = {
_sample_name: {
strategy: map(np.array, [[THRESHOLD], [asimov(s, b) for s, b in zip(perf_dict[_sample_name][strategy], perf_dict[bkg_sample_name][strategy])]])
for strategy in ["BDT", "mHmatch", "pThigh", "pTjb"]
}
for _sample_name in headers#[ x for x in sample_names if x != bkg_sample_name ]
}
# -- Write dictionary of Asimov significances to disk
utils.ensure_directory(os.path.join("output", "pickles"))
with open(os.path.join("output", "pickles", "multi_proc_{}_{}.pkl".format(strategy, category)), "wb") as f:
cPickle.dump(asimov_dict, f)
# -- Plot Z_BDT/Z_old for different threshold values
plot_asimov.bdt_old_ratio(asimov_dict, category, strategy, 'mHmatch', lower_bound)
# -- Print Asimov significance for different strategies and different samples in tabular form
# Each table corresponds to a different threshold value
for threshold in THRESHOLD:
#print '\nAsimov significance for threshold = {}:\n{}'.format(threshold, tabulate(
logger.info('\nAsimov significance for threshold = {}:\n{}'.format(threshold, tabulate(
[
[strategy] + [
asimov_dict[_class][strategy][1][np.isclose(asimov_dict[_class][strategy][0], threshold)]
for _class in headers
]
for strategy in ['BDT', 'mHmatch', 'pTjb', 'pThigh']
],
headers=[''] + headers,
floatfmt=".5f"
))
)
def asimov(s, b):
"""
Definition:
-----------
Calculates signal to background sensitivity according to the Asimov formula
Args:
-----
s: float, the number of jet pairs properly classified as "correct" that fall in the m_jb window
b: float, the number of jet pairs mistakenly classified as "correct" that fall in the m_jb window
Returns:
--------
The result of the Asimov formula given s and b
"""
import math
return math.sqrt(2 * ((s + b) * math.log(1 + (s / b)) - s))
def eval_performance(yhat_test_ev, yhat_mHmatch_test_ev, yhat_pThigh_test_ev, yhat_pTjb_test_ev, y_event, mjb_event, w_test, THRESHOLD):
"""
Definition:
-----------
Log event-level performance outputs as info
Args:
-----
yhat_test_ev: event level numpy array containing the predictions from the BDT for each jet in the event
yhat_mHmatch_test_ev: event level numpy array containing the predictions from mHmatch for each jet in the event
yhat_pThigh_test_ev: event level numpy array containing the predictions from pThigh for each jet in the event
yhat_pTjb_test_ev: event level numpy array containing the predictions from pTjb for each jet in the event
y_event: event level numpy array containing the truth labels for each jet in the event
mjb_event: event level numpy array containing the values of m_jb for each jet in the event
THRESHOLD: an integer or iterable of integers
"""
logger = logging.getLogger("eval_performance")
logger.info("BDT: Number of correctly classified events = {:5} out of {} events having a correct pair".format(*count_correct_total(yhat_test_ev, y_event)))
logger.info("mHmatch: Number of correctly classified events = {:5} out of {} events having a correct pair".format(*count_correct_total(yhat_mHmatch_test_ev, y_event)))
logger.info("pThigh: Number of correctly classified events = {:5} out of {} events having a correct pair".format(*count_correct_total(yhat_pThigh_test_ev, y_event)))
logger.info("pTjb: Number of correctly classified events = {:5} out of {} events having a correct pair".format(*count_correct_total(yhat_pTjb_test_ev, y_event)))
logger.info("Number of events without any correct pair = {}".format(sum([sum(y_event[ev]) == 0 for ev in xrange(len(y_event))])))
# check whether selected pair has m_jb in mass window for truly correct and truly incorrect pairs
# -- this will make little sense for SM_merged because lots of events are bkg and shouldn"t fall in m_bj window, but can"t tell them
# -- apart without mcChannel number --> use unmerged samples in that case
# 3 categories: truly correct pair present and got it right, truly correct pair present and got it wrong, no correct pair present
# -- check this for all 3 strategies (BDT, mHmatch, pThigh)
# 1. was there a correct pair?
correct_present_truth = np.array([sum(ev) == 1 for ev in y_event])
# ^ this is strategy agnostic, can be calculated outside
in_BDT = in_mjb_window(mjb_event, y_event, yhat_test_ev, w_test, correct_present_truth, "BDT", THRESHOLD)
in_mHmatch = in_mjb_window(mjb_event, y_event, yhat_mHmatch_test_ev, w_test, correct_present_truth, "mHmatch", THRESHOLD)
in_pThigh = in_mjb_window(mjb_event, y_event, yhat_pThigh_test_ev, w_test, correct_present_truth, "pThigh", THRESHOLD)
in_pTjb = in_mjb_window(mjb_event, y_event, yhat_pTjb_test_ev, w_test, correct_present_truth, "pTjb", THRESHOLD)
return {"BDT": in_BDT, "mHmatch": in_mHmatch, "pThigh": in_pThigh, "pTjb" : in_pTjb}
def count_correct_total(yhat, y):
"""
Definition:
-----------
Quantify the number of events in which the correct jet pair was assigned the highest classifier score
Args:
-----
yhat: event level numpy array containing the predictions for each jet in the event
y: event level numpy array containing the truth labels for each jet in the event
Returns:
--------
n_correct_classifier: int, number of events in which the correct jet pair was assigned the highest classifier score
n_correct_truth: int, total number of events with a "correct" jet pair
"""
# -- find how many times we find the correct pair in all events that do have a correct pair
# correct_classifier = a truly correct pair existed (sum(y[ev]) == 1) and we got it right (np.argmax(yhat[ev]) == np.argmax(y[ev]))
n_correct_classifier = sum([np.argmax(yhat[ev]) == np.argmax(y[ev]) for ev in xrange(len(y)) if sum(y[ev]) == 1])
# correct_truth = a truly correct pair exists
n_correct_truth = sum([sum(y[ev]) == 1 for ev in xrange(len(y))])
return n_correct_classifier, n_correct_truth
def _weightedsum_eventsinmjb(weights_in_mjb, yhat, slicer, thresh):
sliced_weights = weights_in_mjb[slicer]
sliced_yhat = np.array(yhat)[slicer]
return np.sum(w[np.argmax(y)] for w, y in izip(sliced_weights, sliced_yhat) if max(y) >= thresh)
def in_mjb_window(mjb_event, y_event, yhat_test_ev, w_test, correct_present_truth, strategy, THRESHOLD):
logger = logging.getLogger("mjb_window - " + strategy)
# -- if there was a correct pair and we got it right, how many times does it fall into m_jb? how many times does it not?
# -- if there was a correct pair and we got it wrong, how many times does it fall into m_jb? how many times does it not?
# -- if there was no correct pair, how many times does the pair we picked fall into m_jb? how many times does it not?
# 1. was there a correct pair?
# correct_present_truth
# 2. does the bdt agree with the truth label? aka got it right?
agree_with_truth = np.array([(np.argmax(yhat) == np.argmax(y)) for yhat, y in izip(yhat_test_ev, y_event)])
# 3. truly correct present and selected (A)
correct_truth_correct_BDT = np.array(np.logical_and(correct_present_truth, agree_with_truth))
# 4. truly correct present but selected other pair (B)
correct_truth_incorrect_BDT = np.array(np.logical_and(correct_present_truth, -agree_with_truth))
# 5. no correct jet present = - correct_present_truth (C)
# -- look at mjb for these 3 cases:
# -- boolean
in_mjb = [np.logical_and(mjb_event[ev] < 135, mjb_event[ev] > 95) for ev in xrange(len(mjb_event))]
# -- weights * boolean
weights_in_mjb = np.array([_w * _m for _w, _m in izip(w_test, in_mjb)])
if hasattr(THRESHOLD, "__iter__"):
# num_inX are lists in this scenario
num_inA = Parallel(n_jobs=20, verbose=True)(delayed(_weightedsum_eventsinmjb)(weights_in_mjb, yhat_test_ev, correct_truth_correct_BDT, thresh) for thresh in THRESHOLD)
num_inB = Parallel(n_jobs=20, verbose=True)(delayed(_weightedsum_eventsinmjb)(weights_in_mjb, yhat_test_ev, correct_truth_incorrect_BDT, thresh) for thresh in THRESHOLD)
num_inC = Parallel(n_jobs=20, verbose=True)(delayed(_weightedsum_eventsinmjb)(weights_in_mjb, yhat_test_ev, -correct_present_truth, thresh) for thresh in THRESHOLD)
return np.array([num_inA, num_inB, num_inC]).sum(axis=0)
else:
num_inA = _weightedsum_eventsinmjb(weights_in_mjb, yhat_test_ev, correct_truth_correct_BDT, thresh=THRESHOLD)
num_inB = _weightedsum_eventsinmjb(weights_in_mjb, yhat_test_ev, correct_truth_incorrect_BDT, thresh=THRESHOLD)
num_inC = _weightedsum_eventsinmjb(weights_in_mjb, yhat_test_ev, -correct_present_truth, thresh=THRESHOLD)
logger.info("Total number of events with a correct pair present and identified = {}".format(sum((w * c) for w, c in izip(w_test, correct_truth_correct_BDT))))
logger.info("Of these events, {} fall in m_jb window".format(num_inA))
logger.info("Total number of events with a correct pair present but a different one selected = {}".format(sum((w * c) for w, c in izip(w_test, correct_truth_incorrect_BDT))))
logger.info("Of these events, {} fall in m_jb window".format(num_inB))
logger.info("Total number of events without a correct pair = {}".format(sum((w * c) for w, c in izip(w_test, -correct_present_truth))))
logger.info("Of these events, out of the ones selected by the classifier, {} fall in m_jb window".format(num_inC))
logger.info("Total number of events in the m_jb window = {}".format(num_inA + num_inB + num_inC))
return [num_inA + num_inB + num_inC]
if __name__ == "__main__":
import sys
import argparse
utils.configure_logging()
parser = argparse.ArgumentParser(description="Check event level performance")
#parser.add_argument("--sample_names", help="list of names of samples to evaluate", type=str, nargs="+", default=[])
parser.add_argument("--strategy", type=str, default="skl_BDT",
help="Strategy to evaluate. Options are: root_tmva, skl_BDT. Default: skl_BDT")
parser.add_argument("--category", type=str, default="low_mass",
help="Trained classifier to use for event-level evaluation. Examples are: low_mass, high_mass. Default: low_mass")
parser.add_argument("--intervals", type=int, default=21,
help="Number of threshold values to test. Default: 21")
args = parser.parse_args()
if args.strategy == 'skl_BDT':
lower_bound = 0
elif args.strategy == 'root_tmva':
lower_bound = -1
else:
raise ValueError("Unknown strategy. The only options are root_tmva and skl_BDT.")
sys.exit(main(args.strategy, args.category, lower_bound, args.intervals))
|
#!/usr/bin/env python
#######################################################
#
# @Author: Hardik Mehta <[email protected]>
#
# @version: 0.1 basic script
#
########################################################
import sys, urllib, codecs
from xml.dom import minidom, Node
class WeatherInfo:
def __init__(self,location="Munich,Germany"):
#self._urlPart = "http://www.google.com/ig/api?weather="
#self.url = "http://www.google.de/ig/api?weather=" + location
self._urlPart = "http://www.google.com/ig/api?"
self.general = {"location": "N/A", "unit":"Metric","city":"N/A"}
self.current_condition = {"condition":"N/A","temp_c":"N/A","temp_f":"N/A","humidity":"N/A","wind_condition":"N/A"}
self.forecast_conditions = [{"day_of_week":"N/A","low":"N/A","high":"N/A","condition":"N/A"}]
def parse(self,location="Munich,Germany"):
#strUrl = self._urlPart + location
strUrl = self._urlPart + urllib.urlencode({'weather' : location})
#+'&' + urllib.urlencode({'hl':'it'})
print strUrl
try:
sock = urllib.urlopen(strUrl)
except IOError:
self.general["location"] = "Connection Error"
return
#encoding = sock.headers['Content-type'].split('charset=')[1]
#print encoding;
#strUtf = strResponse.decode(encoding).encode('utf-8')
#doc = minidom.parseString(strUtf)
doc = minidom.parse(sock)
nodes = doc.getElementsByTagName("forecast_information")
# fetch general info
if len(nodes) <> 0:
node = nodes[0]
self.general["location"] = (node.getElementsByTagName("postal_code")[0]).getAttribute("data")
self.general["unit"] = (node.getElementsByTagName("unit_system")[0]).getAttribute("data")
self.general["city"] = (node.getElementsByTagName("city")[0]).getAttribute("data")
self.general["city"] = (node.getElementsByTagName("city")[0]).getAttribute("data")
# fetch current conditions
nodes = doc.getElementsByTagName("current_conditions")
if len(nodes) <> 0:
node = nodes[0]
for key in self.current_condition.keys():
self.current_condition[key] = (node.getElementsByTagName(key)[0]).getAttribute("data")
# fetch forecast conditions
fc = doc.getElementsByTagName("forecast_conditions")
if len(fc) <> 0:
fc_conditions = list()
for elem in fc:
condition = dict()
for key in self.forecast_conditions[0].keys():
condition[key] = (elem.getElementsByTagName(key)[0]).getAttribute("data")
fc_conditions.append(condition)
self.forecast_conditions = fc_conditions
def show(self):
for k, v in self.general.iteritems():
print k, v
print "\n"
for k, v in self.current_condition.iteritems():
print k, v
print "\n"
for fc in self.forecast_conditions:
for k, v in fc.iteritems():
print k, v
print ""
if __name__ == "__main__":
wi = WeatherInfo()
wi.show()
wi.parse();
print("-------------")
wi.show()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
default: present
durable:
description:
- whether queue is durable or not
type: bool
default: 'yes'
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
type: bool
default: 'no'
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
max_priority:
description:
- Maximum number of priority levels for the queue to support.
- If not set, the queue will not support message priorities.
- Larger numbers indicate higher priority.
version_added: "2.4"
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
default: {}
extends_documentation_fragment:
- rabbitmq
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue:
name: myQueue
# Create a queue on remote host
- rabbitmq_queue:
name: myRemoteQueue
login_user: user
login_password: secret
login_host: remote.example.org
'''
import json
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
def main():
argument_spec = rabbitmq_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
durable=dict(default=True, type='bool'),
auto_delete=dict(default=False, type='bool'),
message_ttl=dict(default=None, type='int'),
auto_expires=dict(default=None, type='int'),
max_length=dict(default=None, type='int'),
dead_letter_exchange=dict(default=None, type='str'),
dead_letter_routing_key=dict(default=None, type='str'),
arguments=dict(default=dict(), type='dict'),
max_priority=dict(default=None, type='int')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
url = "%s://%s:%s/api/queues/%s/%s" % (
module.params['login_protocol'],
module.params['login_host'],
module.params['login_port'],
urllib_parse.quote(module.params['vhost'], ''),
module.params['name']
)
if not HAS_REQUESTS:
module.fail_json(msg="requests library is required for this module. To install, use `pip install requests`")
result = dict(changed=False, name=module.params['name'])
# Check if queue already exists
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
verify=module.params['cacert'], cert=(module.params['cert'], module.params['key']))
if r.status_code == 200:
queue_exists = True
response = r.json()
elif r.status_code == 404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg="Invalid response from RESTAPI when trying to check if queue exists",
details=r.text
)
if module.params['state'] == 'present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
) and
(
('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
) and
(
('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
) and
(
('x-dead-letter-exchange' in response['arguments'] and
response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
) and
(
('x-dead-letter-routing-key' in response['arguments'] and
response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
) and
(
('x-max-priority' in response['arguments'] and
response['arguments']['x-max-priority'] == module.params['max_priority']) or
('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
)
):
module.fail_json(
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k, v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key',
'max_priority': 'x-max-priority'
}.items():
if module.params[k] is not None:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
result['changed'] = change_required
result['details'] = response
result['arguments'] = module.params['arguments']
module.exit_json(**result)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth=(module.params['login_user'], module.params['login_password']),
headers={"content-type": "application/json"},
data=json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
}),
verify=module.params['cacert'],
cert=(module.params['cert'], module.params['key'])
)
elif module.params['state'] == 'absent':
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
verify=module.params['cacert'], cert=(module.params['cert'], module.params['key']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
result['changed'] = True
module.exit_json(**result)
else:
module.fail_json(
msg="Error creating queue",
status=r.status_code,
details=r.text
)
else:
module.exit_json(
changed=False,
name=module.params['name']
)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import os
import tempfile
import re
from jsondb.backends.sqlite3_backend import Sqlite3Backend
from jsondb.backends.url import URL
from jsondb.util import IS_WINDOWS
drivers = {
'sqlite3' : Sqlite3Backend,
}
class Error(Exception):
pass
class NonAvailableSchemeError(Error):
pass
def create(connstr, *args, **kws):
if not connstr:
# assume sqlite3
fd, path = tempfile.mkstemp(suffix='.jsondb')
connstr = 'sqlite3://%s' % (os.path.abspath(os.path.normpath(path)))
if IS_WINDOWS:
connstr = 'sqlite3:///%s' % (os.path.abspath(os.path.normpath(path)))
if IS_WINDOWS and not re.match(r'^[^:/]+://.*$', connstr):
connstr = 'sqlite3:///%s' % (os.path.abspath(os.path.normpath(connstr)))
url = URL.parse(connstr)
if not url.driver:
url.driver = 'sqlite3'
name = url.driver
cls = drivers.get(name.lower(), None)
if not cls:
raise NonAvailableSchemeError(name)
return cls(url=url, *args, **kws) if cls else None
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.version as dist_version
import re
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
# TODO(JLH) Should we remove the explicit include of the ovs plugin here
from neutron.plugins.openvswitch.common import constants
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
OPTS = [
cfg.IntOpt('ovs_vsctl_timeout',
default=DEFAULT_OVS_VSCTL_TIMEOUT,
help=_('Timeout in seconds for ovs-vsctl commands')),
]
cfg.CONF.register_opts(OPTS)
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name=" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if not check_error:
ctxt.reraise = False
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 2\n' in str(e):
ctxt.reraise = False
return False
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 1\n' in str(e):
ctxt.reraise = False
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def set_controller(self, controller_names):
vsctl_command = ['--', 'set-controller', self.br_name]
vsctl_command.extend(controller_names)
self.run_vsctl(vsctl_command, check_error=True)
def del_controller(self):
self.run_vsctl(['--', 'del-controller', self.br_name],
check_error=True)
def get_controller(self):
res = self.run_vsctl(['--', 'get-controller', self.br_name],
check_error=True)
if res:
return res.strip().split('\n')
return res
def set_protocols(self, protocols):
self.run_vsctl(['--', 'set', 'bridge', self.br_name,
"protocols=%s" % protocols],
check_error=True)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self):
self.destroy()
self.create()
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
return self.db_get_val("Interface", port_name, "ofport")
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def add_flow(self, **kwargs):
flow_str = _build_flow_expr_str(kwargs, 'add')
if self.defer_apply_flows:
self.deferred_flows['add'] += flow_str + '\n'
else:
self.run_ofctl("add-flow", [flow_str])
def mod_flow(self, **kwargs):
flow_str = _build_flow_expr_str(kwargs, 'mod')
if self.defer_apply_flows:
self.deferred_flows['mod'] += flow_str + '\n'
else:
self.run_ofctl("mod-flows", [flow_str])
def delete_flows(self, **kwargs):
flow_expr_str = _build_flow_expr_str(kwargs, 'del')
if self.defer_apply_flows:
self.deferred_flows['del'] += flow_expr_str + '\n'
else:
self.run_ofctl("del-flows", [flow_expr_str])
def defer_apply_on(self):
LOG.debug(_('defer_apply_on'))
self.defer_apply_flows = True
def defer_apply_off(self):
LOG.debug(_('defer_apply_off'))
for action, flows in self.deferred_flows.items():
if flows:
LOG.debug(_('Applying following deferred flows '
'to bridge %s'), self.br_name)
for line in flows.splitlines():
LOG.debug(_('%(action)s: %(flow)s'),
{'action': action, 'flow': line})
self.run_ofctl('%s-flows' % action, ['-'], flows)
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == p_const.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
return self.get_port_ofport(port_name)
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids",
check_error=True)
ofport = self.db_get_val("Interface", name, "ofport",
check_error=True)
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids,ofport',
'list', 'Interface']
result = self.run_vsctl(args, check_error=True)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
# Do not consider VIFs which aren't yet ready
# This can happen when ofport values are either [] or ["set", []]
# We will therefore consider only integer values for ofport
ofport = row[2]
try:
int_ofport = int(ofport)
except (ValueError, TypeError):
LOG.warn(_("Found not yet ready openvswitch port: %s"), row)
else:
if int_ofport > 0:
if ("iface-id" in external_ids and
"attached-mac" in external_ids):
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not
# automatically synced to OVS from XAPI, we grab it
# from XAPI directly
iface_id = self.get_xapi_iface_id(
external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
else:
LOG.warn(_("Found failed openvswitch port: %s"), row)
return edge_ports
def get_port_tag_dict(self):
"""Get a dict of port names and associated vlan tags.
e.g. the returned dict is of the following form::
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
The TAG ID is only available in the "Port" table and is not available
in the "Interface" table queried by the get_vif_port_set() method.
"""
port_names = self.get_port_name_list()
args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port']
result = self.run_vsctl(args, check_error=True)
port_tag_dict = {}
if not result:
return port_tag_dict
for name, tag in jsonutils.loads(result)['data']:
if name not in port_names:
continue
# 'tag' can be [u'set', []] or an integer
if isinstance(tag, list):
tag = tag[1]
port_tag_dict[name] = tag
return port_tag_dict
def get_vif_port_by_id(self, port_id):
args = ['--format=json', '--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
json_result = jsonutils.loads(result)
try:
# Retrieve the indexes of the columns we're looking for
headings = json_result['headings']
ext_ids_idx = headings.index('external_ids')
name_idx = headings.index('name')
ofport_idx = headings.index('ofport')
# If data attribute is missing or empty the line below will raise
# an exeception which will be captured in this block.
# We won't deal with the possibility of ovs-vsctl return multiple
# rows since the interface identifier is unique
data = json_result['data'][0]
port_name = data[name_idx]
switch = get_bridge_for_iface(self.root_helper, port_name)
if switch != self.br_name:
LOG.info(_("Port: %(port_name)s is on %(switch)s,"
" not on %(br_name)s"), {'port_name': port_name,
'switch': switch,
'br_name': self.br_name})
return
ofport = data[ofport_idx]
# ofport must be integer otherwise return None
if not isinstance(ofport, int) or ofport == -1:
LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a "
"positive integer"), {'ofport': ofport,
'vif': port_id})
return
# Find VIF's mac address in external ids
ext_id_dict = dict((item[0], item[1]) for item in
data[ext_ids_idx][1])
vif_mac = ext_id_dict['attached-mac']
return VifPort(port_name, ofport, port_id, vif_mac, self)
except Exception as e:
LOG.warn(_("Unable to parse interface details. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
def get_installed_ovs_usr_version(root_helper):
args = ["ovs-vsctl", "--version"]
try:
cmd = utils.execute(args, root_helper=root_helper)
ver = re.findall("\d+\.\d+", cmd)[0]
return ver
except Exception:
LOG.exception(_("Unable to retrieve OVS userspace version."))
def get_installed_ovs_klm_version():
args = ["modinfo", "openvswitch"]
try:
cmd = utils.execute(args)
for line in cmd.split('\n'):
if 'version: ' in line and not 'srcversion' in line:
ver = re.findall("\d+\.\d+", line)
return ver[0]
except Exception:
LOG.exception(_("Unable to retrieve OVS kernel module version."))
def get_installed_kernel_version():
args = ["uname", "-r"]
try:
cmd = utils.execute(args)
for line in cmd.split('\n'):
return str(re.findall("\d+\.\d+\.\d+", line))
except Exception:
LOG.exception(_("Unable to retrieve installed Linux kernel version."))
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
def _compare_installed_and_required_version(
installed_kernel_version, installed_version, required_version,
check_type, version_type):
if installed_kernel_version:
if dist_version.StrictVersion(
installed_kernel_version) >= dist_version.StrictVersion(
constants.MINIMUM_LINUX_KERNEL_OVS_VXLAN):
return
if installed_version:
if dist_version.StrictVersion(
installed_version) < dist_version.StrictVersion(
required_version):
msg = (_('Failed %(ctype)s version check for Open '
'vSwitch with %(vtype)s support. To use '
'%(vtype)s tunnels with OVS, please ensure '
'the OVS version is %(required)s or newer!') %
{'ctype': check_type, 'vtype': version_type,
'required': required_version})
raise SystemError(msg)
else:
msg = (_('Unable to determine %(ctype)s version for Open '
'vSwitch with %(vtype)s support. To use '
'%(vtype)s tunnels with OVS, please ensure '
'that the version is %(required)s or newer!') %
{'ctype': check_type, 'vtype': version_type,
'required': required_version})
raise SystemError(msg)
def check_ovs_vxlan_version(root_helper):
min_required_version = constants.MINIMUM_OVS_VXLAN_VERSION
installed_klm_version = get_installed_ovs_klm_version()
installed_kernel_version = get_installed_kernel_version()
installed_usr_version = get_installed_ovs_usr_version(root_helper)
LOG.debug(_("Checking OVS version for VXLAN support "
"installed klm version is %(klm)s, installed Linux version is "
"%(kernel)s, installed user version is %(usr)s ") %
{'klm': installed_klm_version,
'kernel': installed_kernel_version,
'usr': installed_usr_version})
# First check the userspace version
_compare_installed_and_required_version(None, installed_usr_version,
min_required_version,
'userspace', 'VXLAN')
# Now check the kernel version
_compare_installed_and_required_version(installed_kernel_version,
installed_klm_version,
min_required_version,
'kernel', 'VXLAN')
def _build_flow_expr_str(flow_dict, cmd):
flow_expr_arr = []
actions = None
if cmd == 'add':
flow_expr_arr.append("hard_timeout=%s" %
flow_dict.pop('hard_timeout', '0'))
flow_expr_arr.append("idle_timeout=%s" %
flow_dict.pop('idle_timeout', '0'))
flow_expr_arr.append("priority=%s" %
flow_dict.pop('priority', '1'))
elif 'priority' in flow_dict:
msg = _("Cannot match priority on flow deletion or modification")
raise exceptions.InvalidInput(error_message=msg)
if cmd != 'del':
if "actions" not in flow_dict:
msg = _("Must specify one or more actions on flow addition"
" or modification")
raise exceptions.InvalidInput(error_message=msg)
actions = "actions=%s" % flow_dict.pop('actions')
for key, value in flow_dict.iteritems():
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
if actions:
flow_expr_arr.append(actions)
return ','.join(flow_expr_arr)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# valores btc usd y VEB
#
# Copyright 2014 David Rodriguez <davidrodriguez at gmail dot com>
# el api es gracias a la gente de bitven.com @diariobitcoin y dolartoday
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#importando los modulos a usar
import urllib2
import simplejson
import json
import android,sys
droid=android.Android()
#from pprint import pprint
#import requests
#from pyquery import PyQuery
rv=''
rk=''
mb=''
ecu1=''
def btcval():
#esta son urls donde estan los archivos json
url = "http://api.bitven.com/prices"
#Aqui usamos urllib2 para abrir la pagina
resp = urllib2.Request(url)
opener = urllib2.build_opener()
#aqui obtenemos la data
data = opener.open(resp)
#en este punto decodificamos la data y podemos separar lo que necesitemos
result = simplejson.load(data)
# Se separa las llaves
rk=result.keys()
# Se separan los valores
rv=result.values()
# se multiplica los valores para tener el valor del Bolivar
mb= rv[1]*rv[0]
# Se formatea el resultado de lo requerido
print '*******************************************'
print 'Tasas del mercado actual de divisas '
print '*******************************************'
print 'Bitcoin | Dolartoday | Bolivar '
print '*******************************************'
print str(rv[0]),' ฿ ' ' | ' + str(rv[1]),' $. ' ' | '+str(mb), ' Bs. '
print '*******************************************'
rvb = rv[0]
rvus = rv[1]
#Se muestra la informacion del contacto seleccionado
droid.dialogCreateAlert(' Btc, usd, vef ',' ฿ %s, %s, $ %s Bs. ' %(rvb, rvus, mb) )
#Se crea el botón aceptar
droid.dialogSetPositiveButtonText('Aceptar')
#Se muestra la ventana
droid.dialogShow()
return 0
btcval()
|
# Copyright 2004-2005 Joe Wreschnig, Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import ctypes
from quodlibet.util import load_library
from ._audio import AudioFile, translate_errors
extensions = [
'.669', '.amf', '.ams', '.dsm', '.far', '.it', '.med', '.mod', '.mt2',
'.mtm', '.okt', '.s3m', '.stm', '.ult', '.gdm', '.xm']
try:
_modplug = load_library(
["libmodplug.so.1", "libmodplug.so.0", "libmodplug-1.dll"])[0]
except OSError:
extensions = []
else:
_modplug.ModPlug_GetName.argtypes = [ctypes.c_void_p]
_modplug.ModPlug_GetName.restype = ctypes.c_char_p
_modplug.ModPlug_Load.argtypes = [ctypes.c_void_p, ctypes.c_int]
_modplug.ModPlug_Load.restype = ctypes.c_void_p
_modplug.ModPlug_GetLength.argtypes = [ctypes.c_void_p]
_modplug.ModPlug_GetLength.restype = ctypes.c_int
_modplug.ModPlug_Unload.argtypes = [ctypes.c_void_p]
_modplug.ModPlug_Unload.restype = None
class ModFile(AudioFile):
format = "MOD/XM/IT"
def __init__(self, filename):
with translate_errors():
data = open(filename, "rb").read()
f = _modplug.ModPlug_Load(data, len(data))
if not f:
raise IOError("%r not a valid MOD file" % filename)
self["~#length"] = _modplug.ModPlug_GetLength(f) // 1000
title = _modplug.ModPlug_GetName(f) or os.path.basename(filename)
try:
self["title"] = title.decode('utf-8')
except UnicodeError:
self["title"] = title.decode("iso-8859-1")
_modplug.ModPlug_Unload(f)
self.sanitize(filename)
def write(self):
pass
def reload(self, *args):
artist = self.get("artist")
super(ModFile, self).reload(*args)
if artist is not None:
self.setdefault("artist", artist)
def can_change(self, k=None):
if k is None:
return ["artist"]
else:
return k == "artist"
loader = ModFile
types = [ModFile]
|
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# Keep all the constants used in the glideinWMS
#
# Author: Igor Sfiligoi
#
import time
import string
import os.path
def get_timestr(when=time.time()):
start_time_tuple=time.localtime(when)
timestr=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.printable[start_time_tuple[4]]+ #minute
string.printable[start_time_tuple[5]]) #first minute digit
return timestr
TIMESTR=get_timestr()
# insert timestr just before the last .
def insert_timestr(str):
arr=string.split(str,'.')
if len(arr)==1:
arr.append(TIMESTR)
else:
arr.insert(-1,TIMESTR)
return string.join(arr,'.')
# these two are in the work dir, so they can be changed
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.cfg"
VARS_FILE="condor_vars.lst"
CONSTS_FILE="constants.cfg"
UNTAR_CFG_FILE="untar.cfg"
FILE_LISTFILE="file_list.lst"
SIGNATURE_FILE="signature.sha1"
BLACKLIST_FILE="nodes.blacklist"
GRIDMAP_FILE='grid-mapfile'
|
# -*- coding: utf-8 -*-
# [email protected]
#
import PyFly
import sys
class GBDT_LR:
def __init__(self, tree_model_file, lr_model_file, tree_feature_offset):
self.__gbdt = PyFly.load_gbdt(tree_model_file)
self.__lr = PyFly.load_lr(lr_model_file)
self.__feature_offset = tree_feature_offset
def predict(self, tree_input):
# get tree_feature and input to LR, output LR score.
# input format:
# [(idx, value), ..]
tf = map(lambda x:(x + self.__feature_offset, 1), PyFly.tree_features(self.__gbdt, tree_input))
total_feature = tree_input + tf
return PyFly.predict(self.__lr, total_feature)
if __name__ == '__main__':
if len(sys.argv)!=3:
print >> sys.stderr, 'Usage: tree_feature_lr.py <tree_model_file> <lr_model_file>\n\n'
sys.exit(-1)
model = GBDT_LR(sys.argv[1], sys.argv[2], 21)
# test input.
while 1:
line = sys.stdin.readline()
if line == '':
break
arr = line.strip('\n').split(' ')
label = arr[0]
tree_input = map(lambda x:(int(x[0]), float(x[1])), map(lambda x:x.split(':'), arr[1:]))
output = model.predict(tree_input)
print label, output
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a thirdparty project
from __future__ import print_function
import logging
import pprint
import sys
from django.core.management.base import BaseCommand
from desktop import conf
from desktop.lib.daemon_utils import drop_privileges_if_necessary
CPSERVER_HELP = r"""
Run Hue using the CherryPy WSGI server.
"""
CPSERVER_OPTIONS = {
'host': conf.HTTP_HOST.get(),
'port': conf.HTTP_PORT.get(),
'server_name': 'localhost',
'threads': conf.CHERRYPY_SERVER_THREADS.get(),
'daemonize': False, # supervisor does this for us
'workdir': None,
'pidfile': None,
'server_user': conf.SERVER_USER.get(),
'server_group': conf.SERVER_GROUP.get(),
'ssl_certificate': conf.SSL_CERTIFICATE.get(),
'ssl_private_key': conf.SSL_PRIVATE_KEY.get(),
'ssl_certificate_chain': conf.SSL_CERTIFICATE_CHAIN.get(),
'ssl_cipher_list': conf.SSL_CIPHER_LIST.get(),
'ssl_no_renegotiation': conf.SSL_NO_RENEGOTIATION.get()
}
class Command(BaseCommand):
help = "CherryPy Server for Desktop."
args = ""
def handle(self, *args, **options):
from django.conf import settings
from django.utils import translation
if not conf.ENABLE_SERVER.get():
logging.info("Hue is configured to not start its own web server.")
sys.exit(0)
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
runcpserver(args)
def usage(self, subcommand):
return CPSERVER_HELP
def start_server(options):
"""
Start CherryPy server
"""
from desktop.lib.wsgiserver import CherryPyWSGIServer as Server
from desktop.lib.wsgiserver import SSLConnection
from django.core.handlers.wsgi import WSGIHandler
# Translogger wraps a WSGI app with Apache-style combined logging.
server = Server(
(options['host'], int(options['port'])),
WSGIHandler(),
int(options['threads']),
options['server_name']
)
if options['ssl_certificate'] and options['ssl_private_key']:
server.ssl_certificate = options['ssl_certificate']
server.ssl_private_key = options['ssl_private_key']
if options['ssl_certificate_chain']:
server.ssl_certificate_chain = options['ssl_certificate_chain']
server.ssl_cipher_list = options['ssl_cipher_list']
server.ssl_no_renegotiation = options['ssl_no_renegotiation']
ssl_password = conf.get_ssl_password()
if ssl_password:
server.ssl_password_cb = lambda *unused: ssl_password
try:
server.bind_server()
drop_privileges_if_necessary(options)
if isinstance(server.socket, SSLConnection):
ciphers = server.socket.get_cipher_list()
logging.info("List of enabled ciphers: {}".format(':'.join(ciphers)))
server.listen_and_loop()
except KeyboardInterrupt:
server.stop()
def runcpserver(argset=[], **kwargs):
# Get the options
options = CPSERVER_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
print(CPSERVER_HELP)
return
# Start the webserver
logging.info("Starting server with options:\n{}".format(pprint.pformat(options)))
start_server(options)
if __name__ == '__main__':
runcpserver(sys.argv[1:])
|
#!/usr/bin/env python3
# ~/dev/py/rnglib/testRandomFunc.py
""" Exercise the random number generator functions. """
import math
import time
import unittest
from rnglib import SimpleRNG
class TestRandomFunc(unittest.TestCase):
"""
Exercise the random number generator functions.
This is not a test in the usual sense. It exercises random.Random
functions through a SimpleRNG instance and makes the results available
for human inspection.
This code is hacked from python2.7/random.py and so made available
under the same license as Python itself.
"""
def do_test(self, count, func, args):
""" Carry out tests with specified parameters. """
# print("%u invocations of %s" % (n, func.__name__))
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t00 = time.time()
for _ in range(count):
xxx = func(*args)
total += xxx
sqsum = sqsum + xxx * xxx
smallest = min(xxx, smallest)
largest = max(xxx, largest)
t01 = time.time()
print(" %6.4f sec, " % round(t01 - t00, 4), end=' ')
avg = total / count
stddev = math.sqrt(sqsum / count - avg * avg)
print('avg %g, stddev %g, min %g, max %g' %
(avg, stddev, smallest, largest))
def rand_test(self, count=1000):
""" Repeath a suite of tests N times. """
rng = SimpleRNG(time.time())
self.do_test(count, rng.random, ())
self.do_test(count, rng.normalvariate, (0.0, 1.0))
self.do_test(count, rng.lognormvariate, (0.0, 1.0))
self.do_test(count, rng.vonmisesvariate, (0.0, 1.0))
self.do_test(count, rng.gammavariate, (0.01, 1.0))
self.do_test(count, rng.gammavariate, (0.1, 1.0))
self.do_test(count, rng.gammavariate, (0.1, 2.0))
self.do_test(count, rng.gammavariate, (0.5, 1.0))
self.do_test(count, rng.gammavariate, (0.9, 1.0))
self.do_test(count, rng.gammavariate, (1.0, 1.0))
self.do_test(count, rng.gammavariate, (2.0, 1.0))
self.do_test(count, rng.gammavariate, (20.0, 1.0))
self.do_test(count, rng.gammavariate, (200.0, 1.0))
self.do_test(count, rng.gauss, (0.0, 1.0))
self.do_test(count, rng.betavariate, (3.0, 3.0))
self.do_test(count, rng.triangular, (0.0, 1.0, 1.0 / 3.0))
def test_rand_test(self):
""" Repeat the test suite a thousand times. """
self.rand_test(count=1000)
if __name__ == '__main__':
unittest.main()
|
"""
Serilizers for the task application API
"""
# Django
from django.contrib.auth.models import User
# Third Party
from rest_framework import serializers
# MuckRock
from muckrock.agency.models import Agency
from muckrock.foia.models import FOIACommunication, FOIARequest
from muckrock.jurisdiction.models import Jurisdiction
from muckrock.task.models import (
FlaggedTask,
NewAgencyTask,
OrphanTask,
ResponseTask,
SnailMailTask,
Task,
)
class TaskSerializer(serializers.ModelSerializer):
"""Serializer for Task model"""
assigned = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
orphantask = serializers.PrimaryKeyRelatedField(
queryset=OrphanTask.objects.all(), style={"base_template": "input.html"}
)
snailmailtask = serializers.PrimaryKeyRelatedField(
queryset=SnailMailTask.objects.all(), style={"base_template": "input.html"}
)
flaggedtask = serializers.PrimaryKeyRelatedField(
queryset=FlaggedTask.objects.all(), style={"base_template": "input.html"}
)
newagencytask = serializers.PrimaryKeyRelatedField(
queryset=NewAgencyTask.objects.all(), style={"base_template": "input.html"}
)
responsetask = serializers.PrimaryKeyRelatedField(
queryset=ResponseTask.objects.all(), style={"base_template": "input.html"}
)
class Meta:
model = Task
fields = (
"id",
"date_created",
"date_done",
"resolved",
"assigned",
"orphantask",
"snailmailtask",
"rejectedemailtask",
"flaggedtask",
"newagencytask",
"responsetask",
)
class OrphanTaskSerializer(serializers.ModelSerializer):
"""Serializer for OrphanTask model"""
assigned = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
resolved_by = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
communication = serializers.PrimaryKeyRelatedField(
queryset=FOIACommunication.objects.all(), style={"base_template": "input.html"}
)
class Meta:
model = OrphanTask
fields = "__all__"
class SnailMailTaskSerializer(serializers.ModelSerializer):
"""Serializer for SnailMailTask model"""
assigned = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
resolved_by = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
communication = serializers.PrimaryKeyRelatedField(
queryset=FOIACommunication.objects.all(), style={"base_template": "input.html"}
)
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
class Meta:
model = SnailMailTask
fields = "__all__"
class FlaggedTaskSerializer(serializers.ModelSerializer):
"""Serializer for FlaggedTask model"""
assigned = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
resolved_by = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
foia = serializers.PrimaryKeyRelatedField(
queryset=FOIARequest.objects.all(), style={"base_template": "input.html"}
)
agency = serializers.PrimaryKeyRelatedField(
queryset=Agency.objects.all(), style={"base_template": "input.html"}
)
jurisdiction = serializers.PrimaryKeyRelatedField(
queryset=Jurisdiction.objects.all(), style={"base_template": "input.html"}
)
class Meta:
model = FlaggedTask
fields = "__all__"
class NewAgencyTaskSerializer(serializers.ModelSerializer):
"""Serializer for NewAgencyTask model"""
assigned = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
resolved_by = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
agency = serializers.PrimaryKeyRelatedField(
queryset=Agency.objects.all(), style={"base_template": "input.html"}
)
class Meta:
model = NewAgencyTask
fields = "__all__"
class ResponseTaskSerializer(serializers.ModelSerializer):
"""Serializer for ResponseTask model"""
assigned = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
resolved_by = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(), style={"base_template": "input.html"}
)
communication = serializers.PrimaryKeyRelatedField(
queryset=FOIACommunication.objects.all(), style={"base_template": "input.html"}
)
class Meta:
model = ResponseTask
fields = "__all__"
|
from infodenguepredict.data.infodengue import build_multicity_dataset, get_cluster_data
from infodenguepredict.models.deeplearning.lstm import single_prediction
import numpy as np
import pandas as pd
import re
import pickle
def rank_cities(state):
mult = build_multicity_dataset(state)
cols = list(filter(re.compile('casos_\d+').search, mult.columns))
mult = mult[cols]
print(mult.head())
codes = pd.read_excel('../../data/codigos_{}.xlsx'.format(state),
names=['city', 'code'], header=None).set_index('code').T
ints = pd.DataFrame()
for col in mult.columns:
# ints.loc[codes[int(re.sub('casos_', '', col))]] = [np.trapz(mult[col])]
ints[col] = [np.trapz(mult[col])]
return ints
if __name__ == "__main__":
TIME_WINDOW = 4
HIDDEN = 4
LOOK_BACK = 4
BATCH_SIZE = 1
prediction_window = 3 # weeks
# city = 3303500
state = 'RJ'
epochs = 10
rank = rank_cities(state)
mapes = []
for col in rank:
city = re.sub('casos_', '', col)
metric = single_prediction(int(city), state, predict_n=prediction_window, time_window=TIME_WINDOW, hidden=HIDDEN,
epochs=epochs)
mapes.append(metric)
rank = rank.T
rank['mape'] = mapes
rank.to_pickle('rank.pkl')
|
from random import randint, uniform
from elixir_models import Object
# Some helpers for random.
def randloc():
return randint(-100, 100)
def randrot():
return randint(-360, 360)
def randscale():
return uniform(.75, 1.25)
class BaseZone(object):
def __init__(self, logger=None):
'''Initialize the zone.
Insert whatever objects into the database programmatically.
This includes loading things from a disk file if you so choose.
It will not run more than once on the zone's database.
If you want new content on an existing database, either make
a script to apply changes, or just delete the database for
that zone and recreate it when the zone is started up again.
'''
self.setup_logging(logger=logger)
self.load()
@staticmethod
def randobj(name="Object #%s", resource='object', count=1, states=None, scripts=None):
objs = []
for i in xrange(count):
obj = Object()
obj.name = name % i
obj.resource = resource
obj.loc_x, obj.loc_y, obj.loc_z = randloc(), randloc(), randloc()
obj.rot_x, obj.rot_y, obj.rot_z = randrot(), randrot(), randrot()
obj.scale_x, obj.scale_y, obj.scale_z = randscale(), randscale(), randscale()
obj.vel_x, obj.vel_y, obj.vel_z = 0, 0, 0
if states:
obj.states.extend(states)
if scripts:
obj.scripts.extend(scripts)
obj.save()
objs.append(obj)
return objs
def setup_logging(self, logger=None):
if logger:
self.logger = logger
else:
import logging
self.logger = logging.getLogger('zoneserver.'+__file__)
def load(self):
if not self.is_loaded():
self.insert_objects()
# Loading complete.
self.set_loaded()
def is_loaded(self):
if Object.get_objects(name='Loading Complete.'):
return True
else:
return False
def set_loaded(self):
from sys import maxint
obj = Object()
obj.name = "Loading Complete."
far = maxint*-1
obj.loc_x, obj.loc_y, obj.loc_z = far, far, far
obj.states.extend(['hidden'])
obj.save()
print obj.name
def insert_objects(self):
'''Insert any objects you want to be present in the zone into the
database in this call.
This gets called exactly once per database. If you change something here
and want it to appear in the zone's database, you will need to clear the
database first.
Deleting the "Loading Complete" object will only cause duplicates.
Do not do this.
'''
pass
|
import sublime, sublime_plugin
import re
import time
class ReplaceAllAppearances(sublime_plugin.WindowCommand):
original_text = ""
def run(self):
text = ""
active_view = self.window.active_view()
for region in active_view.sel():
if region.empty():
text = active_view.substr(active_view.word(region.begin()))
if not re.match(r'^[A-Za-z0-9_]+$', text):
return
if len(text) <= 2:
return
self.original_text = text
self.window.show_input_panel(
'Replace {0} with'.format(text),
'',
lambda query: (
self.replace(query)
),
None,
None
)
def replace(self, query):
if query.startswith("#"):
query = query[1:]
views = self.window.views()
else:
views = [self.window.active_view()]
total = 0
for view in views:
original_string = view.substr(sublime.Region(0, view.size()))
pp = '\\b' + self.original_text + '\\b'
p = re.compile(pp)
(new_string, count) = p.subn(query, original_string)
total = total + count
if new_string != original_string:
view.run_command('replace_content', {"new_content": new_string})
sublime.status_message("Replace {0} occurrences from {1} files".format(total, len(views)))
pass
class ReplaceContent(sublime_plugin.TextCommand):
def run(self, edit, new_content):
view = self.view
view.replace(edit, sublime.Region(0, view.size()), new_content)
class AlignColon(sublime_plugin.TextCommand):
def previousLine(self, line):
return self.view.line(sublime.Region(line.begin() - 1))
def firstColon(self, str):
return str.find(':')
def totalBeginingSpace(self, str):
r = 0
while r < len(str) and str[r] == ' ':
r = r + 1
return r
def run(self, edit):
for region in self.view.sel():
if region.empty():
cur = self.view.line(region)
prev = self.previousLine(cur)
prev_line = self.view.substr(prev)
cur_line = self.view.substr(cur)
p = self.firstColon(prev_line)
c = self.firstColon(cur_line)
print(prev_line)
print(cur_line)
if p >= 0 and c >= 0:
if p > c:
self.view.insert(edit, cur.begin(), ' ' * (p - c))
elif c > p:
r = self.totalBeginingSpace(cur_line)
r = min(r, c - p)
self.view.erase(edit, sublime.Region(cur.begin(), cur.begin() + r))
class CopyCurrentWord(sublime_plugin.TextCommand):
last_copy_time = None
last_copy_id = None
allowedSet = None
def run(self, edit):
copy_delay = 1.2
for region in self.view.sel():
if region.empty():
current_time = time.time()
current_id = self.get_copy_id(region)
if self.last_copy_time and self.last_copy_id == current_id and (current_time < self.last_copy_time + copy_delay):
sublime.set_clipboard(self.view.substr(self.get_extended_region(region.begin())))
else:
sublime.set_clipboard(self.view.substr(self.view.word(region.begin())))
self.last_copy_time = current_time
self.last_copy_id = current_id
break
def get_copy_id(self, region):
return "%d:%d:%d" % (self.view.id(), region.begin(), region.end())
def get_extended_region(self, pos):
if not self.allowedSet:
self.allowedSet = set()
for c in range(ord('a'), ord('z') + 1):
self.allowedSet.add(chr(c))
for c in range(ord('A'), ord('Z') + 1):
self.allowedSet.add(chr(c))
for c in range(ord('0'), ord('9') + 1):
self.allowedSet.add(chr(c))
self.allowedSet.add('_')
self.allowedSet.add('.')
r = pos
while r < self.view.size() and self.view.substr(r) in self.allowedSet:
r = r + 1
l = pos - 1
while l >= 0 and self.view.substr(l) in self.allowedSet:
l = l - 1
return sublime.Region(l + 1, r)
# We need to run a text command in order to force the view to update it cursor rendering
class ShowViewAtPosition(sublime_plugin.TextCommand):
def run(self, edit, position, length = 0):
self.view.show_at_center(position)
self.view.sel().clear()
end_position = position
if length > 0:
print("=== end_position = ", end_position)
end_position = position + length
self.view.sel().add(sublime.Region(position, end_position))
|
# -*- coding: utf-8 -*-
import json
from io import StringIO
from operator import itemgetter
from django.shortcuts import get_object_or_404
from guardian.shortcuts import assign_perm
from catmaid.control.authentication import (can_edit_all_or_fail,
can_edit_or_fail, PermissionError)
from catmaid.control.common import get_relation_to_id_map, get_class_to_id_map
from catmaid.models import ClassInstance, ClassInstanceClassInstance, Log
from catmaid.models import Treenode, TreenodeClassInstance, TreenodeConnector
from catmaid.models import User, Group
from catmaid.state import make_nocheck_state
from .common import CatmaidApiTestCase
class TreenodesApiTests(CatmaidApiTestCase):
def test_list_treenode_table_empty(self):
self.fake_authentication()
response = self.client.get('/%d/skeletons/%d/node-overview' % \
(self.test_project_id, 0))
self.assertStatus(response)
expected_result = [[], [], []]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result, parsed_response)
def test_fail_update_confidence(self):
treenode_id = Treenode.objects.order_by("-id")[0].id + 1 # Inexistant
self.fake_authentication()
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '4'})
self.assertEqual(response.status_code, 400)
expected_result = 'No skeleton and neuron for treenode %s' % treenode_id
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result, parsed_response['error'])
def test_update_confidence_of_treenode(self):
treenode_id = 11
self.fake_authentication()
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '4', 'state': make_nocheck_state()})
self.assertStatus(response)
treenode = Treenode.objects.filter(id=treenode_id).get()
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {
'message': 'success',
'updated_partners': {
'7': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 5
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('7', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('7').get('old_confidence'),
parsed_response.get('updated_partners').get('7').get('old_confidence'))
self.assertEqual(4, treenode.confidence)
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '5', 'state': make_nocheck_state()})
self.assertStatus(response)
treenode = Treenode.objects.filter(id=treenode_id).get()
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {
'message': 'success',
'updated_partners': {
'7': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 4
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('7', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('7').get('old_confidence'),
parsed_response.get('updated_partners').get('7').get('old_confidence'))
self.assertEqual(5, treenode.confidence)
def test_update_confidence_of_treenode_connector(self):
treenode_id = 285
treenode_connector_id = 360
self.fake_authentication()
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '4', 'to_connector': 'true',
'state': make_nocheck_state()})
self.assertStatus(response)
connector = TreenodeConnector.objects.filter(id=treenode_connector_id).get()
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {
'message': 'success',
'updated_partners': {
'356': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 5
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('356', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('356').get('old_confidence'),
parsed_response.get('updated_partners').get('356').get('old_confidence'))
self.assertEqual(4, connector.confidence)
response = self.client.post(
'/%d/treenodes/%d/confidence' % (self.test_project_id, treenode_id),
{'new_confidence': '5', 'to_connector': 'true', 'state': make_nocheck_state()})
self.assertStatus(response)
connector = TreenodeConnector.objects.filter(id=treenode_connector_id).get()
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {
'message': 'success',
'updated_partners': {
'356': {
'edition_time': '2016-04-13T05:57:44.444Z',
'old_confidence': 4
}
}
}
self.assertIn('message', parsed_response)
self.assertEqual(expected_result.get('message'), parsed_response.get('message'))
self.assertIn('updated_partners', parsed_response)
self.assertIn('356', parsed_response.get('updated_partners'))
self.assertEqual(expected_result.get('updated_partners').get('356').get('old_confidence'),
parsed_response.get('updated_partners').get('356').get('old_confidence'))
self.assertEqual(5, connector.confidence)
def test_create_treenode(self):
self.fake_authentication()
relation_map = get_relation_to_id_map(self.test_project_id)
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': -1,
'radius': 2})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count + 1, count_skeletons())
self.assertEqual(neuron_count + 1, count_neurons())
neuron_skeleton_relation = ClassInstanceClassInstance.objects.get(
project=self.test_project_id,
relation=relation_map['model_of'],
class_instance_a=parsed_response['skeleton_id'])
neuron_id = neuron_skeleton_relation.class_instance_b.id
neuron_log = Log.objects.get(
project=self.test_project_id,
operation_type='create_neuron',
freetext=f'Create neuron {neuron_id} and skeleton {parsed_response["skeleton_id"]}')
neuron_log_location = neuron_log.location
self.assertEqual(5, neuron_log_location.x)
self.assertEqual(10, neuron_log_location.y)
self.assertEqual(15, neuron_log_location.z)
def test_create_treenode2(self):
self.fake_authentication()
relation_map = get_relation_to_id_map(self.test_project_id)
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': -1,
'radius': 2})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count + 1, count_skeletons())
self.assertEqual(neuron_count + 1, count_neurons())
neuron_skeleton_relation = ClassInstanceClassInstance.objects.get(
project=self.test_project_id,
relation=relation_map['model_of'],
class_instance_a=parsed_response['skeleton_id'])
neuron_id = neuron_skeleton_relation.class_instance_b.id
neuron_log = Log.objects.get(
project=self.test_project_id,
operation_type='create_neuron',
freetext=f'Create neuron {neuron_id} and skeleton {parsed_response["skeleton_id"]}')
root = ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['root'])[0]
neuron_log_location = neuron_log.location
self.assertEqual(5, neuron_log_location.x)
self.assertEqual(10, neuron_log_location.y)
self.assertEqual(15, neuron_log_location.z)
def test_create_treenode_with_existing_neuron(self):
self.fake_authentication()
relation_map = get_relation_to_id_map(self.test_project_id)
class_map = get_class_to_id_map(self.test_project_id)
neuron_id = 2389
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_treenodes = lambda: Treenode.objects.all().count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': -1,
'useneuron': neuron_id,
'radius': 2})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count + 1, count_skeletons())
neuron_skeleton_relation = ClassInstanceClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['model_of'],
class_instance_a=parsed_response['skeleton_id'],
class_instance_b=neuron_id)
self.assertEqual(1, neuron_skeleton_relation.count())
def test_create_treenode_with_nonexisting_parent_failure(self):
self.fake_authentication()
parent_id = 555555
treenode_count = Treenode.objects.all().count()
relation_count = TreenodeClassInstance.objects.all().count()
response = self.client.post('/%d/treenode/create' % self.test_project_id, {
'x': 5,
'y': 10,
'z': 15,
'confidence': 5,
'parent_id': parent_id,
'radius': 2,
'state': make_nocheck_state()})
self.assertEqual(response.status_code, 400)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {'error': 'Parent treenode %d does not exist' % parent_id}
self.assertIn(expected_result['error'], parsed_response['error'])
self.assertEqual(treenode_count, Treenode.objects.all().count())
self.assertEqual(relation_count, TreenodeClassInstance.objects.all().count())
def test_update_treenode_parent(self):
self.fake_authentication()
skeleton_id = 373
treenode_id = 405
new_parent_id = 403
response = self.client.post(
'/%d/treenodes/%d/parent' % (self.test_project_id, treenode_id),
{'parent_id': new_parent_id, 'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
response = self.client.post(
'/%d/%d/1/1/compact-skeleton' % (self.test_project_id, skeleton_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_response = [
[[377, None, 3, 7620.0, 2890.0, 0.0, -1.0, 5],
[403, 377, 3, 7840.0, 2380.0, 0.0, -1.0, 5],
[405, 403, 3, 7390.0, 3510.0, 0.0, -1.0, 5],
[407, 405, 3, 7080.0, 3960.0, 0.0, -1.0, 5],
[409, 407, 3, 6630.0, 4330.0, 0.0, -1.0, 5]],
[[377, 356, 1, 6730.0, 2700.0, 0.0],
[409, 421, 1, 6260.0, 3990.0, 0.0]],
{"uncertain end": [403]}]
self.assertCountEqual(parsed_response[0], expected_response[0])
self.assertCountEqual(parsed_response[1], expected_response[1])
self.assertEqual(parsed_response[2], expected_response[2])
def test_delete_root_treenode_with_children_failure(self):
self.fake_authentication()
treenode_id = 367
tn_count = Treenode.objects.all().count()
child_count = Treenode.objects.filter(parent=treenode_id).count()
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertEqual(response.status_code, 400)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = "Could not delete root node: You can't delete the " \
"root node when it has children."
self.assertEqual(expected_result, parsed_response['error'])
self.assertEqual(1, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(tn_count, Treenode.objects.all().count())
self.assertEqual(child_count, Treenode.objects.filter(parent=treenode_id).count())
def test_insert_treenoded_on_edge(self):
self.fake_authentication()
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2374
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y)
new_node_z = 0.5 * (child.location_z + parent.location_z)
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id,
'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
new_node_id = parsed_response['treenode_id']
new_node = Treenode.objects.get(pk=new_node_id)
child = Treenode.objects.get(pk=child_id)
self.assertEqual(new_node.parent_id, parent_id)
self.assertEqual(child.parent_id, new_node_id)
self.assertEqual(new_node.user_id, self.test_user_id)
self.assertEqual(new_node.skeleton_id, child.skeleton_id)
self.assertEqual(new_node.location_x, new_node_x)
self.assertEqual(new_node.location_y, new_node_y)
self.assertEqual(new_node.location_z, new_node_z)
def test_insert_treenoded_not_on_edge_with_permission(self):
self.fake_authentication()
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2374
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y) + 10
new_node_z = 0.5 * (child.location_z + parent.location_z)
# Try to insert with a slight distorition in Y. This is allowed if the
# user has permission to edit the neuron.
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id,
'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('treenode_id' in parsed_response)
self.assertTrue('skeleton_id' in parsed_response)
self.assertEqual(treenode_count + 1, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
new_node_id = parsed_response['treenode_id']
new_node = Treenode.objects.get(pk=new_node_id)
child = Treenode.objects.get(pk=child_id)
self.assertEqual(new_node.parent_id, parent_id)
self.assertEqual(child.parent_id, new_node_id)
self.assertEqual(new_node.user_id, self.test_user_id)
self.assertEqual(new_node.skeleton_id, child.skeleton_id)
self.assertEqual(new_node.location_x, new_node_x)
self.assertEqual(new_node.location_y, new_node_y)
self.assertEqual(new_node.location_z, new_node_z)
def test_insert_treenoded_not_on_edge_without_permission(self):
self.fake_authentication(username='test0')
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2374
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
# Set chld and parent to different creators and lock it
owner = User.objects.get(username='admin')
for n in (child, parent):
n.creator = owner
n.save()
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y) + 10
new_node_z = 0.5 * (child.location_z + parent.location_z)
# Try to insert with a slight distorition in Y. This should fail since
# the new node would introduce a structural change to the skeleton.
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id})
self.assertEqual(response.status_code, 403)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('error' in parsed_response)
self.assertEqual(treenode_count, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
def test_insert_treenoded_no_child_parent(self):
self.fake_authentication()
class_map = get_class_to_id_map(self.test_project_id)
count_treenodes = lambda: Treenode.objects.all().count()
count_skeletons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['skeleton']).count()
count_neurons = lambda: ClassInstance.objects.filter(
project=self.test_project_id,
class_column=class_map['neuron']).count()
treenode_count = count_treenodes()
skeleton_count = count_skeletons()
neuron_count = count_neurons()
# Get two nodes and calculate point between them
child_id = 2376
parent_id = 2372
child = Treenode.objects.get(pk=child_id)
parent = Treenode.objects.get(pk=parent_id)
new_node_x = 0.5 * (child.location_x + parent.location_x)
new_node_y = 0.5 * (child.location_y + parent.location_y)
new_node_z = 0.5 * (child.location_z + parent.location_z)
# Try to insert with a slight distorition in Y
response = self.client.post('/%d/treenode/insert' % self.test_project_id, {
'x': new_node_x,
'y': new_node_y,
'z': new_node_z,
'child_id': child_id,
'parent_id': parent_id})
self.assertEqual(response.status_code, 400)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertTrue('error' in parsed_response)
self.assertEqual(treenode_count, count_treenodes())
self.assertEqual(skeleton_count, count_skeletons())
self.assertEqual(neuron_count, count_neurons())
def test_delete_non_root_non_parent_treenode(self):
self.fake_authentication()
treenode_id = 349
tn_count = Treenode.objects.all().count()
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = 'Removed treenode successfully.'
self.assertEqual(expected_result, parsed_response['success'])
self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(tn_count - 1, Treenode.objects.all().count())
def test_delete_root_treenode(self):
self.fake_authentication()
treenode_id = 2437
treenode = Treenode.objects.filter(id=treenode_id)[0]
children = Treenode.objects.filter(parent=treenode_id)
self.assertEqual(0, children.count())
self.assertEqual(None, treenode.parent)
tn_count = Treenode.objects.all().count()
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {
'success': 'Removed treenode successfully.',
'parent_id': None,
'deleted_neuron': True,
'skeleton_id': 2433,
'children': [],
'confidence': 5,
'radius': -1.0,
'links': [],
'x': 5290.0,
'y': 3930.0,
'z': 279.0
}
self.assertEqual(expected_result, parsed_response)
self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(tn_count - 1, Treenode.objects.all().count())
def test_delete_non_root_treenode(self):
self.fake_authentication()
treenode_id = 265
relation_map = get_relation_to_id_map(self.test_project_id)
get_skeleton = lambda: TreenodeClassInstance.objects.filter(
project=self.test_project_id,
relation=relation_map['element_of'],
treenode=treenode_id)
self.assertEqual(1, get_skeleton().count())
children = Treenode.objects.filter(parent=treenode_id)
self.assertTrue(children.count() > 0)
tn_count = Treenode.objects.all().count()
parent = get_object_or_404(Treenode, id=treenode_id).parent
response = self.client.post(
'/%d/treenode/delete' % self.test_project_id,
{'treenode_id': treenode_id, 'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = 'Removed treenode successfully.'
self.assertEqual(expected_result, parsed_response['success'])
self.assertEqual(0, Treenode.objects.filter(id=treenode_id).count())
self.assertEqual(0, get_skeleton().count())
self.assertEqual(tn_count - 1, Treenode.objects.all().count())
for child in children:
child_after_change = get_object_or_404(Treenode, id=child.id)
self.assertEqual(parent, child_after_change.parent)
def test_treenode_info_nonexisting_treenode_failure(self):
self.fake_authentication()
treenode_id = 55555
response = self.client.get(
'/%d/treenodes/%s/info' % (self.test_project_id, treenode_id))
self.assertEqual(response.status_code, 400)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = 'No skeleton and neuron for treenode %s' % treenode_id
self.assertIn('error', parsed_response)
self.assertEqual(expected_result, parsed_response['error'])
def test_treenode_info(self):
self.fake_authentication()
treenode_id = 239
response = self.client.get(
'/%d/treenodes/%s/info' % (self.test_project_id, treenode_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = {'skeleton_id': 235, 'neuron_id': 233, 'skeleton_name': 'skeleton 235', 'neuron_name': 'branched neuron'}
self.assertEqual(expected_result, parsed_response)
def assertTreenodeHasRadius(self, treenode_id, radius):
"""Helper function for radius update tests."""
treenode = Treenode.objects.get(id=treenode_id)
self.assertEqual(radius, treenode.radius,
'Treenode %d has radius %s not %s' % (treenode_id, treenode.radius, radius))
def test_update_treenode_radius_single_node(self):
self.fake_authentication()
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 0, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(259, old_r), (257, new_r), (255, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_next_branch(self):
self.fake_authentication()
# Test to end node
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 1, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(261, new_r), (259, new_r), (257, new_r),
(255, old_r), (253, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
# Test to branch node
treenode_id = 263
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 1, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(253, old_r), (263, new_r), (265, new_r),
(269, old_r), (267, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_prev_branch(self):
self.fake_authentication()
# Test to branch node
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 2, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(261, old_r), (259, old_r), (257, new_r),
(255, new_r), (253, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
# Test to root node
treenode_id = 253
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 2, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(255, new_r), (263, old_r), (253, new_r),
(251, new_r), (249, new_r), (247, new_r),
(247, new_r), (245, new_r), (243, new_r),
(241, new_r), (239, new_r), (237, old_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_prev_defined_node(self):
self.fake_authentication()
# Set radius at ancestor node
ancestor = Treenode.objects.get(id=251)
ancestor.radius = 7
ancestor.save()
# Test to previous defined node
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 3, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(261, old_r), (259, old_r), (257, new_r),
(255, new_r), (253, new_r), (251, 7)]
# Test on node with defined radius (and propagation to root)
treenode_id = ancestor.id
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 3, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(253, new_r), (251, new_r), (249, new_r),
(247, new_r), (247, new_r), (245, new_r),
(243, new_r), (241, new_r), (239, new_r),
(237, new_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_to_root(self):
self.fake_authentication()
treenode_id = 257
new_r = 5
old_r = -1
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 4, 'state': make_nocheck_state()})
self.assertStatus(response)
expected = [(261, old_r), (259, old_r), (257, new_r),
(255, new_r), (253, new_r), (263, old_r),
(251, new_r), (249, new_r), (247, new_r),
(247, new_r), (245, new_r), (243, new_r),
(241, new_r), (239, new_r), (237, new_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_update_treenode_radius_all_nodes(self):
self.fake_authentication()
treenode_id = 2417
new_r = 5.0
old_r = -1.0
response = self.client.post(
'/%d/treenode/%d/radius' % (self.test_project_id, treenode_id),
{'radius': new_r, 'option': 5, 'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_response = {
'success': True,
'new_radius': new_r,
'updated_nodes': {
'2415': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411},
'2417': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411},
'2419': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411},
'2423': {'edition_time': u'2016-04-08T15:33:16.133Z',
'new': 5.0,
'old': -1.0,
'skeleton_id': 2411}}
}
# The response has updated timetamps (since we updated nodes), we have
# to compare fields manually to ignore them
for k,v in expected_response.items():
self.assertIn(k, parsed_response)
if 'updated_nodes' == k:
continue
self.assertEqual(v, parsed_response.get(k))
for k,v in expected_response['updated_nodes'].items():
self.assertIn(k, parsed_response['updated_nodes'])
result_node = parsed_response['updated_nodes'][k]
for p,pv in v.items():
self.assertIn(p, result_node)
result_value = result_node.get(p)
if 'edition_time' == p:
# Changes through the updated, and the test can't know the
# value, but only check if it changed
self.assertNotEqual(pv, result_value)
else:
self.assertEqual(pv, result_value)
# Don't expect any more items than the above:
self.assertEqual(len(expected_response['updated_nodes']),
len(parsed_response['updated_nodes']))
expected = [(2419, new_r), (2417, new_r), (2415, new_r), (2423, new_r)]
for x in expected:
self.assertTreenodeHasRadius(*x)
def test_node_find_previous_branch(self):
self.fake_authentication()
treenode_id = 257
response = self.client.post(
'/%d/treenodes/%d/previous-branch-or-root' % (self.test_project_id, treenode_id),
{'alt': 0})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# Response should contain one branch.
expected_result = [253, 3685.0, 2160.0, 0.0]
self.assertEqual(expected_result, parsed_response)
treenode_id = 253
response = self.client.post(
'/%d/treenodes/%d/previous-branch-or-root' % (self.test_project_id, treenode_id),
{'alt': 0})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# Response should contain one branch.
expected_result = [237, 1065.0, 3035.0, 0.0]
self.assertEqual(expected_result, parsed_response)
treenode_id = 237
response = self.client.post(
'/%d/treenodes/%d/previous-branch-or-root' % (self.test_project_id, treenode_id),
{'alt': 0})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# Response should contain one branch.
expected_result = [237, 1065.0, 3035.0, 0.0]
self.assertEqual(expected_result, parsed_response)
def test_node_find_next_branch(self):
self.fake_authentication()
treenode_id = 391
response = self.client.post(
'/%d/treenodes/%d/next-branch-or-end' % (self.test_project_id, treenode_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# Response should contain one branch.
expected_result = [[[393, 6910.0, 990.0, 0.0],
[393, 6910.0, 990.0, 0.0],
[399, 5670.0, 640.0, 0.0]]]
self.assertEqual(expected_result, parsed_response)
treenode_id = 253
response = self.client.post(
'/%d/treenodes/%d/next-branch-or-end' % (self.test_project_id, treenode_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# Response should contain two branches, and the larger branch headed by
# node 263 should be first.
expected_result = [[[263, 3915.0, 2105.0, 0.0],
[263, 3915.0, 2105.0, 0.0],
[265, 4570.0, 2125.0, 0.0]],
[[255, 3850.0, 1790.0, 0.0],
[255, 3850.0, 1790.0, 0.0],
[261, 2820.0, 1345.0, 0.0]]]
self.assertEqual(expected_result, parsed_response)
def test_treenode_find_children(self):
self.fake_authentication()
treenode_id = 387
response = self.client.post(
'/%d/treenodes/%d/children' % (self.test_project_id, treenode_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = []
self.assertEqual(expected_result, parsed_response)
treenode_id = 385
response = self.client.post(
'/%d/treenodes/%d/children' % (self.test_project_id, treenode_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = [[[387, 9030.0, 1480.0, 0.0]]]
self.assertEqual(expected_result, parsed_response)
treenode_id = 367
response = self.client.post(
'/%d/treenodes/%d/children' % (self.test_project_id, treenode_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = [[383, 7850.0, 1970.0, 0.0], [391, 6740.0, 1530.0, 0.0]]
parsed_response = [p[0] for p in parsed_response]
for (expected, parsed) in zip(sorted(expected_result), sorted(parsed_response)):
self.assertEqual(expected, parsed)
def test_suppressed_virtual_nodes(self):
self.fake_authentication()
response = self.client.post(
'/%d/treenode/create' % (self.test_project_id, ),
{'x': 1,
'y': -1,
'z': 0})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
parent_id = parsed_response['treenode_id']
skeleton_id = parsed_response['skeleton_id']
response = self.client.post(
'/%d/treenode/create' % (self.test_project_id, ),
{'x': 3,
'y': -3,
'z': 2,
'parent_id': parent_id,
'state': make_nocheck_state()})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
child_id = parsed_response['treenode_id']
# Initially no nodes should be supppressed
response = self.client.get(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, child_id))
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
expected_result = []
self.assertEqual(expected_result, parsed_response)
# Reject attempt to suppress root node
response = self.client.post(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, parent_id),
{'location_coordinate': 1,
'orientation': 0})
self.assertEqual(response.status_code, 400)
# Reject coordinate outside edge
response = self.client.post(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, child_id),
{'location_coordinate': 4,
'orientation': 0})
self.assertEqual(response.status_code, 400)
# Create virtual node
response = self.client.post(
'/%d/treenodes/%d/suppressed-virtual/' % (self.test_project_id, child_id),
{'location_coordinate': 2,
'orientation': 0})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
suppressed_id = parsed_response['id']
# Delete virtual node
response = self.client.delete(
'/%d/treenodes/%d/suppressed-virtual/%d' % (self.test_project_id, child_id, suppressed_id))
self.assertEqual(response.status_code, 204)
def test_list_treenode_table_simple(self):
self.fake_authentication()
response = self.client.get(
'/%d/skeletons/%d/node-overview' % (self.test_project_id, 235))
self.assertStatus(response)
expected_result = [[
[417, 415, 5, 4990.0, 4200.0, 0.0, -1.0, 3, 1323093096.0],
[415, 289, 5, 5810.0, 3950.0, 0.0, -1.0, 3, 1323093096.0],
[289, 285, 5, 6210.0, 3480.0, 0.0, -1.0, 3, 1320587496.0],
[285, 283, 5, 6100.0, 2980.0, 0.0, -1.0, 3, 1323006696.0],
[283, 281, 5, 5985.0, 2745.0, 0.0, -1.0, 3, 1323957096.0],
[281, 279, 5, 5675.0, 2635.0, 0.0, -1.0, 3, 1323093096.0],
[279, 267, 5, 5530.0, 2465.0, 0.0, -1.0, 3, 1323093096.0],
[277, 275, 5, 6090.0, 1550.0, 0.0, -1.0, 3, 1323093096.0],
[275, 273, 5, 5800.0, 1560.0, 0.0, -1.0, 3, 1323093096.0],
[273, 271, 5, 5265.0, 1610.0, 0.0, -1.0, 3, 1323093096.0],
[271, 269, 5, 5090.0, 1675.0, 0.0, -1.0, 3, 1323093096.0],
[269, 265, 5, 4820.0, 1900.0, 0.0, -1.0, 3, 1323093096.0],
[267, 265, 5, 5400.0, 2200.0, 0.0, -1.0, 3, 1323093096.0],
[265, 263, 5, 4570.0, 2125.0, 0.0, -1.0, 3, 1323093096.0],
[263, 253, 5, 3915.0, 2105.0, 0.0, -1.0, 3, 1323093096.0],
[261, 259, 5, 2820.0, 1345.0, 0.0, -1.0, 3, 1323093096.0],
[259, 257, 5, 3445.0, 1385.0, 0.0, -1.0, 3, 1323093096.0],
[257, 255, 5, 3825.0, 1480.0, 0.0, -1.0, 3, 1323093096.0],
[255, 253, 5, 3850.0, 1790.0, 0.0, -1.0, 3, 1323093096.0],
[253, 251, 5, 3685.0, 2160.0, 0.0, -1.0, 3, 1323093096.0],
[251, 249, 5, 3380.0, 2330.0, 0.0, -1.0, 3, 1323093096.0],
[249, 247, 5, 2815.0, 2590.0, 0.0, -1.0, 3, 1323093096.0],
[247, 245, 5, 2610.0, 2700.0, 0.0, -1.0, 3, 1323093096.0],
[245, 243, 5, 1970.0, 2595.0, 0.0, -1.0, 3, 1323093096.0],
[243, 241, 5, 1780.0, 2570.0, 0.0, -1.0, 3, 1323093096.0],
[241, 239, 5, 1340.0, 2660.0, 0.0, -1.0, 3, 1323093096.0],
[239, 237, 5, 1135.0, 2800.0, 0.0, -1.0, 3, 1323093096.0],
[237, None, 5, 1065.0, 3035.0, 0.0, -1.0, 3, 1323093096.0]],
[], [[261, 'TODO']]]
parsed_response = json.loads(response.content.decode('utf-8'))
# Check each aaData row instead of everything at once for more granular
# error reporting. Don't expext the same ordering.
for (expected, parsed) in zip(sorted(expected_result[0]), sorted(parsed_response[0])):
self.assertEqual(expected, parsed)
self.assertEqual(expected_result[1], parsed_response[1])
self.assertEqual(expected_result[2], parsed_response[2])
def test_compact_detail_simple(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'treenode_ids': [261, 417, 415]
})
self.assertStatus(response)
expected_result = [
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[415, 289, 5810.0, 3950.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[417, 415, 4990.0, 4200.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_compact_detail_label_names_and_treenode_set(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'treenode_ids': [261, 417, 415],
'label_names': ['TODO']
})
self.assertStatus(response)
expected_result = [
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_compact_detail_label_names(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'label_names': ['TODO']
})
self.assertStatus(response)
expected_result = [
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[349, 347, 3580.0, 3350.0, 252.0, 5, -1.0, 1, 1323093096.955, 3]
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_compact_detail_label_id_and_treenode_set(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'treenode_ids': [261, 417, 415],
'label_ids': [351]
})
self.assertStatus(response)
expected_result = [
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_compact_detail_label_ids(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'label_ids': [351]
})
self.assertStatus(response)
expected_result = [
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[349, 347, 3580.0, 3350.0, 252.0, 5, -1.0, 1, 1323093096.955, 3]
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_compact_detail_skeleton_ids(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'skeleton_ids': [235]
})
self.assertStatus(response)
expected_result = [
[237, None, 1065.0, 3035.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[239, 237, 1135.0, 2800.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[241, 239, 1340.0, 2660.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[243, 241, 1780.0, 2570.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[245, 243, 1970.0, 2595.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[247, 245, 2610.0, 2700.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[249, 247, 2815.0, 2590.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[251, 249, 3380.0, 2330.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[253, 251, 3685.0, 2160.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[255, 253, 3850.0, 1790.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[257, 255, 3825.0, 1480.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[259, 257, 3445.0, 1385.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[263, 253, 3915.0, 2105.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[265, 263, 4570.0, 2125.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[267, 265, 5400.0, 2200.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[269, 265, 4820.0, 1900.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[271, 269, 5090.0, 1675.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[273, 271, 5265.0, 1610.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[275, 273, 5800.0, 1560.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[277, 275, 6090.0, 1550.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[279, 267, 5530.0, 2465.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[281, 279, 5675.0, 2635.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[283, 281, 5985.0, 2745.0, 0.0, 5, -1.0, 235, 1323957096.955, 3],
[285, 283, 6100.0, 2980.0, 0.0, 5, -1.0, 235, 1323006696.955, 3],
[289, 285, 6210.0, 3480.0, 0.0, 5, -1.0, 235, 1320587496.955, 3],
[415, 289, 5810.0, 3950.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
[417, 415, 4990.0, 4200.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_compact_detail_skeleton_ids_and_label(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'skeleton_ids': [235],
'label_names': ['TODO']
})
self.assertStatus(response)
expected_result = [
[261, 259, 2820.0, 1345.0, 0.0, 5, -1.0, 235, 1323093096.955, 3],
]
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(expected_result,
sorted(parsed_response, key=itemgetter(0)))
def test_non_available_import_user(self):
self.fake_authentication()
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'skeleton_ids': [235],
})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
for tn in parsed_response:
node_response = self.client.get(f'/{self.test_project_id}/treenodes/{tn[0]}/importing-user')
self.assertStatus(response)
parsed_node_response = json.loads(node_response.content.decode('utf-8'))
self.assertEqual(parsed_node_response['importing_user_id'], None)
def test_import_user(self):
self.fake_authentication()
# Get skeleton
url = '/%d/skeleton/235/swc' % (self.test_project_id,)
response = self.client.get(url)
self.assertStatus(response)
orig_swc_string = response.content.decode('utf-8')
# Add permission to import
swc_file = StringIO(orig_swc_string)
assign_perm('can_import', self.test_user, self.test_project)
# Import
response = self.client.post('/%d/skeletons/import' % (self.test_project_id,),
{'file.swc': swc_file, 'name': 'test'})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
new_skeleton_id = parsed_response['skeleton_id']
id_map = parsed_response['node_id_map']
# Get nodes
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'skeleton_ids': [new_skeleton_id],
})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# Check import user
for tn in parsed_response:
node_response = self.client.get(f'/{self.test_project_id}/treenodes/{tn[0]}/importing-user')
self.assertStatus(response)
parsed_node_response = json.loads(node_response.content.decode('utf-8'))
self.assertEqual(parsed_node_response['importing_user_id'], self.test_user.id)
def test_import_user_permissions(self):
self.fake_authentication()
# Get skeleton
url = '/%d/skeleton/2364/eswc' % (self.test_project_id,)
response = self.client.get(url)
self.assertStatus(response)
orig_eswc_string = response.content.decode('utf-8')
# Add permission to import
eswc_file = StringIO(orig_eswc_string)
assign_perm('can_import', self.test_user, self.test_project)
# Import
response = self.client.post('/%d/skeletons/import' % (self.test_project_id,),
{'file.eswc': eswc_file, 'name': 'test'})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
new_skeleton_id = parsed_response['skeleton_id']
id_map = parsed_response['node_id_map']
# Get nodes
response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'skeleton_ids': [new_skeleton_id],
})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
# To make sense, the imported user should not be the same as the test
# user for this test.
skeleton_user_ids = [2, 5]
self.assertNotIn(self.test_user.id, skeleton_user_ids)
for tn in parsed_response:
self.assertIn(tn[-1], skeleton_user_ids)
# Check if import user can edit, both all and individually. These
# functions raise an error if they fail.
treenode_ids = [tn[0] for tn in parsed_response]
can_edit_all_or_fail(self.test_user, treenode_ids, 'treenode')
for tn in parsed_response:
can_edit_or_fail(self.test_user, tn[0], 'treenode')
# To make sure the test user cannot just edit alll nodes, test also
# against the original, non-imported version of the skeleton.
original_nodes_response = self.client.post(
f'/{self.test_project_id}/treenodes/compact-detail',
{
'skeleton_ids': [2364],
})
self.assertStatus(original_nodes_response)
original_nodes_parsed_response = json.loads(original_nodes_response.content.decode('utf-8'))
original_treenode_ids = [tn[0] for tn in original_nodes_parsed_response]
with self.assertRaises(PermissionError, msg="Should not be able to edit other user's skeletons."):
can_edit_all_or_fail(self.test_user, original_treenode_ids, 'treenode')
with self.assertRaises(PermissionError, msg="Should not be able to edit other user's skeletons."):
for tn in original_treenode_ids:
can_edit_or_fail(self.test_user, tn, 'treenode')
# Now test if a third user, who has no permission over the test user has
# permission on the imported nodes. The user shouldn't have access. Then
# the third user is given permission over the test user. The third user
# should now also have permission on the imported data.
third_user = User.objects.create(username='Third user')
with self.assertRaises(PermissionError, msg="Should not be able to edit other user's skeletons."):
can_edit_all_or_fail(third_user, treenode_ids, 'treenode')
with self.assertRaises(PermissionError, msg="Should not be able to edit other user's skeletons."):
for tn in treenode_ids:
can_edit_or_fail(third_user, tn, 'treenode')
# Assign permissions to third user on import user (test user)
test_user_group, created = Group.objects.get_or_create(name=self.test_user.username)
test_user_group.user_set.add(third_user)
# Check if the third user can now edit, both all and individually. These
# functions raise an error if they fail.
can_edit_all_or_fail(third_user, treenode_ids, 'treenode')
for tn in parsed_response:
can_edit_or_fail(third_user, tn[0], 'treenode')
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from . import utils
from .role import Role
from .member import Member
from .emoji import Emoji
from .game import Game
from .channel import Channel
from .enums import ServerRegion, Status, try_enum, VerificationLevel
from .mixins import Hashable
class Server(Hashable):
"""Represents a Discord server.
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two servers are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two servers are not equal. |
+-----------+--------------------------------------+
| hash(x) | Returns the server's hash. |
+-----------+--------------------------------------+
| str(x) | Returns the server's name. |
+-----------+--------------------------------------+
Attributes
----------
name : str
The server name.
me : :class:`Member`
Similar to :attr:`Client.user` except an instance of :class:`Member`.
This is essentially used to get the member version of yourself.
roles
A list of :class:`Role` that the server has available.
emojis
A list of :class:`Emoji` that the server owns.
region : :class:`ServerRegion`
The region the server belongs on. There is a chance that the region
will be a ``str`` if the value is not recognised by the enumerator.
afk_timeout : int
The timeout to get sent to the AFK channel.
afk_channel : :class:`Channel`
The channel that denotes the AFK channel. None if it doesn't exist.
members
An iterable of :class:`Member` that are currently on the server.
channels
An iterable of :class:`Channel` that are currently on the server.
icon : str
The server's icon.
id : str
The server's ID.
owner : :class:`Member`
The member who owns the server.
unavailable : bool
Indicates if the server is unavailable. If this is ``True`` then the
reliability of other attributes outside of :meth:`Server.id` is slim and they might
all be None. It is best to not do anything with the server if it is unavailable.
Check the :func:`on_server_unavailable` and :func:`on_server_available` events.
large : bool
Indicates if the server is a 'large' server. A large server is defined as having
more than ``large_threshold`` count members, which for this library is set to
the maximum of 250.
voice_client: Optional[:class:`VoiceClient`]
The VoiceClient associated with this server. A shortcut for the
:meth:`Client.voice_client_in` call.
mfa_level: int
Indicates the server's two factor authorisation level. If this value is 0 then
the server does not require 2FA for their administrative members. If the value is
1 then they do.
verification_level: :class:`VerificationLevel`
The server's verification level.
features: List[str]
A list of features that the server has. They are currently as follows:
- ``VIP_REGIONS``: Server has VIP voice regions
- ``VANITY_URL``: Server has a vanity invite URL (e.g. discord.gg/discord-api)
- ``INVITE_SPLASH``: Server's invite page has a special splash.
splash: str
The server's invite splash.
"""
__slots__ = ['afk_timeout', 'afk_channel', '_members', '_channels', 'icon',
'name', 'id', 'owner', 'unavailable', 'name', 'region',
'_default_role', '_default_channel', 'roles', '_member_count',
'large', 'owner_id', 'mfa_level', 'emojis', 'features',
'verification_level', 'splash' ]
def __init__(self, **kwargs):
self._channels = {}
self.owner = None
self._members = {}
self._from_data(kwargs)
@property
def channels(self):
return self._channels.values()
def get_channel(self, channel_id):
"""Returns a :class:`Channel` with the given ID. If not found, returns None."""
return self._channels.get(channel_id)
def _add_channel(self, channel):
self._channels[channel.id] = channel
def _remove_channel(self, channel):
self._channels.pop(channel.id, None)
@property
def members(self):
return self._members.values()
def get_member(self, user_id):
"""Returns a :class:`Member` with the given ID. If not found, returns None."""
return self._members.get(user_id)
def _add_member(self, member):
self._members[member.id] = member
def _remove_member(self, member):
self._members.pop(member.id, None)
def __str__(self):
return self.name
def _update_voice_state(self, data):
user_id = data.get('user_id')
member = self.get_member(user_id)
before = None
if member is not None:
before = member._copy()
ch_id = data.get('channel_id')
channel = self.get_channel(ch_id)
member._update_voice_state(voice_channel=channel, **data)
return before, member
def _add_role(self, role):
# roles get added to the bottom (position 1, pos 0 is @everyone)
# so since self.roles has the @everyone role, we can't increment
# its position because it's stuck at position 0. Luckily x += False
# is equivalent to adding 0. So we cast the position to a bool and
# increment it.
for r in self.roles:
r.position += bool(r.position)
self.roles.append(role)
def _remove_role(self, role):
# this raises ValueError if it fails..
self.roles.remove(role)
# since it didn't, we can change the positions now
# basically the same as above except we only decrement
# the position if we're above the role we deleted.
for r in self.roles:
r.position -= r.position > role.position
def _from_data(self, guild):
# according to Stan, this is always available even if the guild is unavailable
# I don't have this guarantee when someone updates the server.
member_count = guild.get('member_count', None)
if member_count:
self._member_count = member_count
self.name = guild.get('name')
self.region = try_enum(ServerRegion, guild.get('region'))
self.verification_level = try_enum(VerificationLevel, guild.get('verification_level'))
self.afk_timeout = guild.get('afk_timeout')
self.icon = guild.get('icon')
self.unavailable = guild.get('unavailable', False)
self.id = guild['id']
self.roles = [Role(server=self, **r) for r in guild.get('roles', [])]
self.mfa_level = guild.get('mfa_level')
self.emojis = [Emoji(server=self, **r) for r in guild.get('emojis', [])]
self.features = guild.get('features', [])
self.splash = guild.get('splash')
for mdata in guild.get('members', []):
roles = [self.default_role]
for role_id in mdata['roles']:
role = utils.find(lambda r: r.id == role_id, self.roles)
if role is not None:
roles.append(role)
mdata['roles'] = sorted(roles)
member = Member(**mdata)
member.server = self
self._add_member(member)
self._sync(guild)
self.large = None if member_count is None else self._member_count >= 250
if 'owner_id' in guild:
self.owner_id = guild['owner_id']
self.owner = self.get_member(self.owner_id)
afk_id = guild.get('afk_channel_id')
self.afk_channel = self.get_channel(afk_id)
for obj in guild.get('voice_states', []):
self._update_voice_state(obj)
def _sync(self, data):
if 'large' in data:
self.large = data['large']
for presence in data.get('presences', []):
user_id = presence['user']['id']
member = self.get_member(user_id)
if member is not None:
member.status = presence['status']
try:
member.status = Status(member.status)
except:
pass
game = presence.get('game', {})
member.game = Game(**game) if game else None
if 'channels' in data:
channels = data['channels']
for c in channels:
channel = Channel(server=self, **c)
self._add_channel(channel)
@utils.cached_slot_property('_default_role')
def default_role(self):
"""Gets the @everyone role that all members have by default."""
return utils.find(lambda r: r.is_everyone, self.roles)
@utils.cached_slot_property('_default_channel')
def default_channel(self):
"""Gets the default :class:`Channel` for the server."""
return utils.find(lambda c: c.is_default, self.channels)
@property
def icon_url(self):
"""Returns the URL version of the server's icon. Returns an empty string if it has no icon."""
if self.icon is None:
return ''
return 'https://cdn.discordapp.com/icons/{0.id}/{0.icon}.jpg'.format(self)
@property
def splash_url(self):
"""Returns the URL version of the server's invite splash. Returns an empty string if it has no splash."""
if self.splash is None:
return ''
return 'https://cdn.discordapp.com/splashes/{0.id}/{0.splash}.jpg?size=2048'.format(self)
@property
def member_count(self):
"""Returns the true member count regardless of it being loaded fully or not."""
return self._member_count
@property
def created_at(self):
"""Returns the server's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def role_hierarchy(self):
"""Returns the server's roles in the order of the hierarchy.
The first element of this list will be the highest role in the
hierarchy.
"""
return sorted(self.roles, reverse=True)
def get_member_named(self, name):
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name : str
The name of the member to lookup with an optional discriminator.
Returns
--------
:class:`Member`
The member in this server with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m):
return m.nick == name or m.name == name
return utils.find(pred, members)
|
'''
Copyright (C) 2015 Andreas Esau
[email protected]
Created by Andreas Esau
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
bl_info = {
"name": "COA Tools",
"description": "This Addon provides a Toolset for a 2D Animation Workflow.",
"author": "Andreas Esau",
"version": (1, 0, 4),
"blender": (2, 79, 0),
"location": "View 3D > Tools > Cutout Animation Tools",
"warning": "",
"wiki_url": "https://github.com/ndee85/coa_tools/wiki",
"tracker_url": "https://github.com/ndee85/coa_tools/issues",
"category": "Ndee Tools" }
import bpy
import os
import shutil
import tempfile
from bpy.app.handlers import persistent
from . import addon_updater_ops
# load and reload submodules
##################################
from . import developer_utils
modules = developer_utils.setup_addon_modules(__path__, __name__)
from . ui import *
from . ui import preview_collections
from . operators.pie_menu import preview_collections_pie
from . functions import *
# register
##################################
import traceback
class COAToolsPreferences(bpy.types.AddonPreferences):
bl_idname = __package__
alpha_update_frequency = bpy.props.IntProperty(name="Alpha Update Frequency",default=1,min=1,description="Updates alpha on each x frame.")
show_donate_icon = bpy.props.BoolProperty(name="Show Donate Icon",default=False)
sprite_import_export_scale = bpy.props.FloatProperty(name="Sprite import/export scale",default=0.01)
sprite_thumb_size = bpy.props.IntProperty(name="Sprite thumbnail size",default=48)
json_export = bpy.props.BoolProperty(name="Experimental Json export",default=False)
dragon_bones_export = bpy.props.BoolProperty(name="Dragonbones Export",default=False)
enable_spritesheets = bpy.props.BoolProperty(name="Enable Spritesheets",default=False, description="This feature is deprecated and should not be used for future projects. Use this only for older projects.")
auto_check_update = bpy.props.BoolProperty(
name = "Auto-check for Update",
description = "If enabled, auto-check for updates using an interval",
default = True,
)
updater_intrval_months = bpy.props.IntProperty(
name='Months',
description = "Number of months between checking for updates",
default=0,
min=0
)
updater_intrval_days = bpy.props.IntProperty(
name='Days',
description = "Number of days between checking for updates",
default=1,
min=0,
)
updater_intrval_hours = bpy.props.IntProperty(
name='Hours',
description = "Number of hours between checking for updates",
default=0,
min=0,
max=23
)
updater_intrval_minutes = bpy.props.IntProperty(
name='Minutes',
description = "Number of minutes between checking for updates",
default=0,
min=0,
max=59
)
def draw(self, context):
layout = self.layout
row = layout.row()
row.alignment = "LEFT"
row.prop(self,"enable_spritesheets",text="")
row.prop(self,"enable_spritesheets",icon="ERROR",emboss=False)
#row.label(text="",icon="ERROR")
layout.prop(self, "show_donate_icon")
layout.prop(self,"json_export")
layout.prop(self,"dragon_bones_export")
layout.prop(self,"sprite_import_export_scale")
layout.prop(self,"sprite_thumb_size")
layout.prop(self,"alpha_update_frequency")
addon_updater_ops.update_settings_ui(self,context)
addon_keymaps = []
def register_keymaps():
addon = bpy.context.window_manager.keyconfigs.addon
km = addon.keymaps.new(name = "3D View", space_type = "VIEW_3D")
# insert keymap items here
kmi = km.keymap_items.new("wm.call_menu_pie", type = "F", value = "PRESS")
kmi.properties.name = "view3d.coa_pie_menu"
addon_keymaps.append(km)
def unregister_keymaps():
wm = bpy.context.window_manager
for km in addon_keymaps:
for kmi in km.keymap_items:
km.keymap_items.remove(kmi)
wm.keyconfigs.addon.keymaps.remove(km)
addon_keymaps.clear()
def register():
addon_updater_ops.register(bl_info)
import bpy.utils.previews
pcoll2 = bpy.utils.previews.new()
pcoll2.my_previews = ()
preview_collections["coa_thumbs"] = pcoll2
pcoll = bpy.utils.previews.new()
pcoll.my_previews = ()
my_icons_dir = os.path.join(os.path.dirname(__file__),"icons")
pcoll.load("donate_icon", os.path.join(my_icons_dir,"donate_icon.png"),'IMAGE')
pcoll.load("twitter_icon", os.path.join(my_icons_dir,"twitter_icon.png"),'IMAGE')
pcoll.load("db_icon", os.path.join(my_icons_dir,"db_icon.png"),'IMAGE')
preview_collections["main"] = pcoll
preview_collections_pie["main"] = pcoll
try: bpy.utils.register_module(__name__)
except: traceback.print_exc()
print("Registered {} with {} modules".format(bl_info["name"], len(modules)))
bpy.types.Object.coa_anim_collections = bpy.props.CollectionProperty(type=AnimationCollections)
bpy.types.Object.coa_uv_default_state = bpy.props.CollectionProperty(type=UVData)
bpy.types.Object.coa_slot = bpy.props.CollectionProperty(type=SlotData)
bpy.types.Scene.coa_ticker = bpy.props.IntProperty()
bpy.types.WindowManager.coa_update_uv = bpy.props.BoolProperty(default=False)
kc = bpy.context.window_manager.keyconfigs.addon
if kc:
km = kc.keymaps.new(name="3D View", space_type="VIEW_3D")
kmi = km.keymap_items.new('view3d.move', 'MIDDLEMOUSE', 'PRESS')
kmi.active = False
bpy.app.handlers.frame_change_post.append(update_sprites)
bpy.app.handlers.scene_update_pre.append(scene_update)
bpy.app.handlers.load_post.append(coa_startup)
register_keymaps()
def unregister():
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
try: bpy.utils.unregister_module(__name__)
except: traceback.print_exc()
print("Unregistered {}".format(bl_info["name"]))
bpy.context.window_manager.coa_running_modal = False
bpy.app.handlers.frame_change_post.remove(update_sprites)
bpy.app.handlers.scene_update_pre.remove(scene_update)
bpy.app.handlers.load_post.remove(coa_startup)
unregister_keymaps()
ticker2 = 0
@persistent
def update_sprites(dummy):
global ticker2
ticker2 += 1
update_scene = False
context = bpy.context
objects = []
if hasattr(context,"visible_objects"):
objects = context.visible_objects
else:
objects = bpy.data.objects
alpha_update_frequency = get_addon_prefs(context).alpha_update_frequency
for obj in objects:
if "coa_sprite" in obj and obj.animation_data != None and obj.type == "MESH":
if obj.coa_sprite_frame != obj.coa_sprite_frame_last:
update_uv(bpy.context,obj)
obj.coa_sprite_frame_last = obj.coa_sprite_frame
if obj.coa_slot_index != obj.coa_slot_index_last:
change_slot_mesh_data(context,obj)
obj.coa_slot_index_last = obj.coa_slot_index
if obj.coa_alpha != obj.coa_alpha_last and ticker2%alpha_update_frequency==0:
set_alpha(obj,bpy.context,obj.coa_alpha)
obj.coa_alpha_last = obj.coa_alpha
update_scene = True
if obj.coa_z_value != obj.coa_z_value_last:
set_z_value(context,obj,obj.coa_z_value)
obj.coa_z_value_last = obj.coa_z_value
if obj.coa_modulate_color != obj.coa_modulate_color_last:
set_modulate_color(obj,context,obj.coa_modulate_color)
obj.coa_modulate_color_last = obj.coa_modulate_color
if "sprite_object" in obj:
if obj.coa_flip_direction != obj.coa_flip_direction_last:
set_direction(obj)
obj.coa_flip_direction_last = obj.coa_flip_direction
if update_scene:
bpy.context.scene.update()
### animation wrap mode
if hasattr(context,"active_object"):
sprite_object = get_sprite_object(context.active_object)
if sprite_object != None and sprite_object.coa_animation_loop:
if context.scene.frame_current > context.scene.frame_end:
context.scene.frame_current = context.scene.frame_start
if context.scene.frame_current == context.scene.coa_frame_last and context.scene.frame_current == context.scene.frame_start:
context.scene.frame_current = context.scene.frame_end
context.scene.coa_frame_last = context.scene.frame_current
ticker = 0
@persistent
def scene_update(dummy):
global ticker
ticker += 1
context = bpy.context
if hasattr(context,"visible_objects"):
objects = context.visible_objects
else:
objects = bpy.data.objects
if hasattr(context,"window_manager"):
wm = bpy.context.window_manager
if wm.coa_update_uv:
for obj in objects:
if "coa_sprite" in obj and obj.animation_data != None and obj.type == "MESH":
if obj.coa_sprite_frame != obj.coa_sprite_frame_last:
update_uv(bpy.context,obj)
obj.coa_sprite_frame_last = obj.coa_sprite_frame
if obj.coa_slot_index != obj.coa_slot_index_last:
change_slot_mesh_data(context,obj)
obj.coa_slot_index_last = obj.coa_slot_index
if obj.coa_z_value != obj.coa_z_value_last:
set_z_value(context,obj,obj.coa_z_value)
obj.coa_z_value_last = obj.coa_z_value
if ticker%5 == 0:
if obj.coa_alpha != obj.coa_alpha_last:
set_alpha(obj,bpy.context,obj.coa_alpha)
obj.coa_alpha_last = obj.coa_alpha
if hasattr(bpy.context,"active_object"):
obj = bpy.context.active_object
if obj != None and not obj.coa_sprite_updated and "coa_sprite" in obj:
for thumb in preview_collections["coa_thumbs"]:
preview_collections["coa_thumbs"][thumb].reload()
obj.coa_sprite_updated = True
def hide_base_sprite_version_fix():
for obj in bpy.data.objects:
if obj.type == "MESH":
if "coa_hide_base_sprite" in obj:
obj.data.coa_hide_base_sprite = obj.coa_hide_base_sprite
del(obj["coa_hide_base_sprite"])
def coa_fix_slots():
for obj in bpy.data.objects:
if obj.coa_type == "SLOT":
for slot in obj.coa_slot:
if slot.name in bpy.data.meshes and slot.mesh == None:
slot.mesh = bpy.data.meshes[slot.name]
### start modal operator
def scene_update_callback(scene):
bpy.app.handlers.scene_update_pre.remove(scene_update_callback)
bpy.context.window_manager.coa_running_modal = False
bpy.ops.wm.coa_modal()
if bpy.context.screen.coa_view == "2D":
set_middle_mouse_move(True)
elif bpy.context.screen.coa_view == "3D":
set_middle_mouse_move(False)
@persistent
def coa_startup(dummy):
print("startup coa modal operator")
bpy.app.handlers.scene_update_pre.append(scene_update_callback)
hide_base_sprite_version_fix()
### version fix
coa_fix_slots() ### fix coa_slots to point to mesh data
for obj in bpy.data.objects:
if obj.type == "MESH":
if "sprite" in obj:
obj["coa_sprite"] = True
del obj["sprite"]
if "coa_sprite" in obj:
obj.coa_sprite_updated = False
obj.coa_tiles_changed = True
set_uv_default_coords(bpy.context,obj)
import atexit
### delete thumbs on blender exit
def delete_thumb_previews():
thumb_dir_path = os.path.join(tempfile.gettempdir(),"coa_thumbs")
if os.path.exists(thumb_dir_path):
shutil.rmtree(thumb_dir_path, ignore_errors=True)
atexit.register(delete_thumb_previews)
|
from panda3d.core import *
from panda3d.direct import *
from direct.interval.LerpInterval import LerpPosHprInterval
from otp.nametag.NametagConstants import *
from DistributedNPCToonBase import *
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
import NPCToons
from toontown.toonbase import TTLocalizer
from toontown.fishing import FishSellGUI
from direct.task.Task import Task
import time
class DistributedNPCFisherman(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.isLocalToon = 0
self.av = None
self.button = None
self.popupInfo = None
self.fishGui = None
self.nextCollision = 0
self.npcType = 'Fisherman'
return
def disable(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupFishGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.popupInfo:
self.popupInfo.destroy()
self.popupInfo = None
if self.fishGui:
self.fishGui.destroy()
self.fishGui = None
self.av = None
if self.isLocalToon:
base.localAvatar.posCamera(0, 0)
DistributedNPCToonBase.disable(self)
return
def generate(self):
DistributedNPCToonBase.generate(self)
self.fishGuiDoneEvent = 'fishGuiDone'
def announceGenerate(self):
DistributedNPCToonBase.announceGenerate(self)
def initToonState(self):
self.setAnimState('neutral', 1.05, None, None)
npcOrigin = self.cr.playGame.hood.loader.geom.find('**/npc_fisherman_origin_%s;+s' % self.posIndex)
print 'fisherman origin: ', npcOrigin
if not npcOrigin.isEmpty():
self.reparentTo(npcOrigin)
self.clearMat()
else:
self.notify.warning('announceGenerate: Could not find npc_fisherman_origin_' + str(self.posIndex))
return
def getCollSphereRadius(self):
return 1.0
def handleCollisionSphereEnter(self, collEntry):
self.currentTime = time.time()
if self.nextCollision > self.currentTime:
self.nextCollision = self.currentTime + 2
else:
base.cr.playGame.getPlace().fsm.request('purchase')
self.sendUpdate('avatarEnter', [])
self.nextCollision = self.currentTime + 2
def __handleUnexpectedExit(self):
self.notify.warning('unexpected exit')
self.av = None
return
def setupAvatars(self, av):
self.ignoreAvatars()
av.stopLookAround()
av.lerpLookAt(Point3(-0.5, 4, 0), time=0.5)
self.stopLookAround()
self.lerpLookAt(Point3(av.getPos(self)), time=0.5)
def resetFisherman(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupFishGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.fishGui:
self.fishGui.destroy()
self.fishGui = None
self.show()
self.startLookAround()
self.detectAvatars()
self.clearMat()
if self.isLocalToon:
self.freeAvatar()
return Task.done
def setMovie(self, mode, npcId, avId, extraArgs, timestamp):
timeStamp = ClockDelta.globalClockDelta.localElapsedTime(timestamp)
self.remain = NPCToons.CLERK_COUNTDOWN_TIME - timeStamp
self.npcId = npcId
self.isLocalToon = avId == base.localAvatar.doId
if mode == NPCToons.SELL_MOVIE_CLEAR:
return
if mode == NPCToons.SELL_MOVIE_TIMEOUT:
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.isLocalToon:
self.ignore(self.fishGuiDoneEvent)
if self.popupInfo:
self.popupInfo.reparentTo(hidden)
if self.fishGui:
self.fishGui.destroy()
self.fishGui = None
self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG, CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_START:
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
self.accept(self.av.uniqueName('disable'), self.__handleUnexpectedExit)
self.setupAvatars(self.av)
if self.isLocalToon:
camera.wrtReparentTo(render)
quat = Quat()
quat.setHpr((-150, -2, 0))
camera.posQuatInterval(1, Point3(-5, 9, base.localAvatar.getHeight() - 0.5), quat, other=self, blendType='easeOut').start()
if self.isLocalToon:
taskMgr.doMethodLater(1.0, self.popupFishGUI, self.uniqueName('popupFishGUI'))
elif mode == NPCToons.SELL_MOVIE_COMPLETE:
chatStr = TTLocalizer.STOREOWNER_THANKSFISH
self.setChatAbsolute(chatStr, CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_TROPHY:
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
numFish, totalNumFish = extraArgs
self.setChatAbsolute(TTLocalizer.STOREOWNER_TROPHY % (numFish, totalNumFish), CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_NOFISH:
chatStr = TTLocalizer.STOREOWNER_NOFISH
self.setChatAbsolute(chatStr, CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_NO_MONEY:
self.notify.warning('SELL_MOVIE_NO_MONEY should not be called')
self.resetFisherman()
return
def __handleSaleDone(self, sell):
self.ignore(self.fishGuiDoneEvent)
self.sendUpdate('completeSale', [sell])
self.fishGui.destroy()
self.fishGui = None
return
def popupFishGUI(self, task):
self.setChatAbsolute('', CFSpeech)
self.acceptOnce(self.fishGuiDoneEvent, self.__handleSaleDone)
self.fishGui = FishSellGUI.FishSellGUI(self.fishGuiDoneEvent)
|
from icalendar import Calendar
JSON_EVENT = "{{ title: '{} ({})', start:'2013-09-{} {}:00', end:'2013-09-{} {}:00', allDay: false}},\n"
def prettyCourseName(course):
course = course[:-3]
if course[-4] == " ":
return course
else:
return course[0:4] + " " + course[4:7]
def weekdays(day):
daylist = {'MO':'02', 'TU':'03', 'WE':'04', 'TH':'05', 'FR':'06', 'SA':'07', 'SU':'08'}
return daylist[day]
def shared(dictionary):
all_list = []
classtimes = {}
classdays = {}
for name in iter(dictionary):
temp_list = [name]
cal = dictionary[name]
cal = Calendar.from_ical(cal)
for component in cal.walk():
if component.name == "VEVENT":
course_name = component.get("SUMMARY")
course_name = str(course_name)
if course_name not in temp_list and "PREC" not in course_name:
temp_list.append(course_name)
day_of_week = str(component.get("RRULE")['BYDAY'][0])
start_time = component.get('DTSTART').dt
start_time = str(start_time)[11:16]
end_time = component.get('DTEND').dt
end_time = str(end_time)[11:16]
if course_name not in classtimes:
classtimes[course_name] = [start_time, end_time]
classdays.setdefault(course_name, []).append(day_of_week)
all_list.append(temp_list)
for course in classdays:
t = classdays[course]
t = list(set(t))
classdays[course] = t
students = {}
for course_list in all_list:
for course in course_list[1:]:
students.setdefault(course, []).append(course_list[0])
final = ""
for course in students:
if len(students[course]) > 1:
for day in classdays[course]:
final += JSON_EVENT.format(
prettyCourseName(course),
", ".join(students[course]),
weekdays(day),
classtimes[course][0],
weekdays(day),
classtimes[course][1])
return final[:-2]
|
# Copyright 2012-2015 Mattias Fliesberg
#
# This file is part of opmuse.
#
# opmuse is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opmuse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with opmuse. If not, see <http://www.gnu.org/licenses/>.
import datetime
import cherrypy
from sqlalchemy.orm import joinedload, undefer
from sqlalchemy import func
from opmuse.library import Album, Track, library_dao
from opmuse.security import User, security_dao
from opmuse.database import get_database
from opmuse.remotes import remotes
from opmuse.queues import queue_dao
from opmuse.search import search
from opmuse.cache import cache
from opmuse.ws import ws
from opmuse.bgtask import NonUniqueQueueError
class Dashboard:
RECENT_TRACK_CACHE_KEY = "dashboard_get_recent_tracks"
RECENT_TRACK_CACHE_AGE = 1200 # 20min
def __init__(self):
cherrypy.engine.subscribe('transcoding.start', self.transcoding_start)
cherrypy.engine.subscribe('transcoding.end', self.transcoding_end)
def transcoding_start(self, transcoder, track):
ws.emit_all('dashboard.listening_now.update')
def transcoding_end(self, track, transcoder):
Dashboard.update_recent_tracks()
ws.emit_all('dashboard.listening_now.update')
@cherrypy.expose
@cherrypy.tools.authenticated(needs_auth=True)
@cherrypy.tools.jinja(filename='dashboard/index.html')
def default(self):
users = []
for user in (get_database()
.query(User)
.order_by(User.active.desc(), User.login)
.filter(User.id != cherrypy.request.user.id)
.limit(10).all()):
remotes.update_user(user)
remotes_user = remotes.get_user(user)
users.append({
'remotes_user': remotes_user,
'user': user,
'playing_track': queue_dao.get_playing_track(user.id)
})
remotes.update_user(cherrypy.request.user)
remotes_user = remotes.get_user(cherrypy.request.user)
current_user = {
'user': cherrypy.request.user,
'playing_track': queue_dao.get_playing_track(cherrypy.request.user.id),
'remotes_user': remotes_user,
}
all_users = users + [current_user]
new_albums = self.get_new_albums(12, 0)
top_artists = None
Dashboard.update_recent_tracks()
top_artists = Dashboard.get_top_artists()
if top_artists is not None:
top_artists = top_artists[0:18]
recently_listeneds = Dashboard.get_recently_listeneds()
return {
'all_users': all_users,
'current_user': current_user,
'users': users,
'top_artists': top_artists,
'recently_listeneds': recently_listeneds,
'new_albums': new_albums
}
@staticmethod
def get_recently_listeneds(by_user=None):
all_recent_tracks = Dashboard.get_recent_tracks()
if all_recent_tracks is None:
return None
recently_listeneds = []
last_recent_track = last_recently_listened = None
count = 0
for recent_track in all_recent_tracks:
if by_user is not None and by_user.id != recent_track['user_id']:
continue
if recent_track['track_id'] is not None:
recent_track['track'] = library_dao.get_track(recent_track['track_id'])
else:
recent_track['track'] = None
if recent_track['artist_id'] is not None:
recent_track['artist'] = library_dao.get_artist(recent_track['artist_id'])
else:
recent_track['artist'] = None
recent_track['user'] = security_dao.get_user(recent_track['user_id'])
recently_listened = None
track = recent_track['track']
user = recent_track['user']
last_track = last_recent_track['track'] if last_recent_track is not None else None
if track is not None and track.album is not None:
if (last_track is None or
last_track.album is None or
last_track.album.id != track.album.id):
recently_listened = {
'entity': track.album,
'tracks': [track],
'users': set([user]),
'plays': 1
}
elif last_recently_listened is not None:
last_recently_listened['users'].add(user)
last_recently_listened['tracks'].append(track)
last_recently_listened['plays'] += 1
elif track is not None and track.album is None:
recently_listened = {
'entity': track,
'users': [user],
}
elif track is None:
recently_listened = {
'entity': recent_track,
'users': [user]
}
if recently_listened is not None:
recently_listeneds.append(recently_listened)
last_recently_listened = recently_listened
count += 1
if count > 20:
break
last_recent_track = recent_track
return recently_listeneds
def get_new_albums(self, limit, offset):
return (get_database()
.query(Album)
.options(joinedload(Album.tracks))
.options(undefer(Album.artist_count))
.join(Track, Album.id == Track.album_id)
.group_by(Album.id)
.order_by(func.max(Track.created).desc())
.limit(limit)
.offset(offset)
.all())
@staticmethod
def get_top_artists():
all_recent_tracks = Dashboard.get_recent_tracks()
if all_recent_tracks is None:
return None
top_artists = {}
for recent_track in all_recent_tracks:
if recent_track['artist_id'] is not None:
recent_track['artist'] = library_dao.get_artist(recent_track['artist_id'])
else:
recent_track['artist'] = None
if recent_track['artist'] is not None:
if recent_track['artist'] not in top_artists:
top_artists[recent_track['artist']] = 1
else:
top_artists[recent_track['artist']] += 1
result = []
for artist, count in sorted(top_artists.items(), key=lambda x: x[1], reverse=True):
result.append({
'artist': artist,
'count': count
})
return result
@staticmethod
def get_recent_tracks():
cache_key = Dashboard.RECENT_TRACK_CACHE_KEY
if cache.has(cache_key):
return cache.get(cache_key)
else:
return None
@staticmethod
def update_recent_tracks():
cache_key = Dashboard.RECENT_TRACK_CACHE_KEY
cache_age = Dashboard.RECENT_TRACK_CACHE_AGE
if cache.needs_update(cache_key, age=cache_age):
cache.keep(cache_key)
try:
cherrypy.engine.bgtask.put_unique(Dashboard._fetch_recent_tracks, 9)
except NonUniqueQueueError:
pass
@staticmethod
def _fetch_recent_tracks():
"""
Look up all listened tracks 4 weeks back in whoosh/search.
"""
now = datetime.datetime.now()
timestamp = int((now - datetime.timedelta(weeks=4)).timestamp())
listened_tracks = library_dao.get_listened_tracks_by_timestmap(timestamp)
recent_tracks = []
for listened_track in listened_tracks:
results = search.get_results_artist(listened_track.artist_name, exact=True)
results = sorted(results, key=lambda result: result[1], reverse=True)
track_id = artist_id = None
if len(results) > 0:
artist_id = results[0][0]
tracks = search.query_track(listened_track.name, exact=True)
if len(tracks) > 0:
for track in tracks:
if track.artist.id == artist_id:
track_id = track.id
recent_tracks.append({
'artist_id': artist_id,
'track_id': track_id,
'artist_name': listened_track.artist_name,
'name': listened_track.name,
'timestamp': listened_track.timestamp,
'user_id': listened_track.user.id
})
cache.set(Dashboard.RECENT_TRACK_CACHE_KEY, recent_tracks)
ws.emit_all('dashboard.recent_tracks.fetched')
_fetch_recent_tracks.bgtask_name = "Fetch recent tracks for dashboard"
|
# Copyright 2017 Become Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for
Service : AdGroupService
Operation: mutate (SET)
API Reference: https://github.com/yahoojp-marketing/sponsored-search-api-documents/blob/201901/docs/en/api_reference/services/AdGroupService.md
Generated by 'api_reference_example_generator.py' using code template 'examples/sample_template.py.template'
"""
import logging
import json
from yahooads import promotionalads
logging.basicConfig(level=logging.INFO)
# logging.getLogger('suds.client').setLevel(logging.DEBUG)
# logging.getLogger('suds.transport').setLevel(logging.DEBUG)
SERVICE = 'AdGroupService'
OPERATION = 'mutate (SET)'
OPERAND = {
"operator": "SET",
"accountId": "SAMPLE-ACCOUNT-ID",
"operand": {
"campaignId": "SAMPLE-CAMPAIN-ID",
"adGroupId": "SAMPLE-ADGROUP-ID",
"adGroupName": "set test adGroup.",
"userStatus": "PAUSED",
"bid": {
"maxCpc": "10000",
"bidSource": "ADGROUP"
},
"settings": {
"xsi_type": "TargetingSetting",
"criterionType": "TARGET_LIST",
"targetAll": "DEACTIVE"
},
"adGroupAdRotationMode": {
"adRotationMode": "OPTIMIZE"
}
}
}
"""
SAMPLE RESPONSE = {
"rval": {
"ListReturnValue.Type": "AdGroupReturnValue",
"Operation.Type": "SET",
"values": {
"operationSucceeded": "true",
"adGroup": {
"accountId": "SAMPLE-ACCOUNT-ID",
"campaignId": "SAMPLE-CAMPAIN-ID",
"campaignTrackId": "100000001",
"campaignName": "test campaign.",
"adGroupId": "SAMPLE-ADGROUP-ID",
"adGroupTrackId": "0",
"adGroupName": "test adGroup.",
"userStatus": "ACTIVE",
"bid": {
"maxCpc": "10000",
"bidSource": "ADGROUP"
},
"settings": {
"xsi_type": "TargetingSetting",
"criterionType": "TARGET_LIST",
"targetAll": "ACTIVE"
},
"trackingUrl": "http://yahoo.co.jp?url={lpurl}&c={campaignid}&g={adgroupid}&a={creative}&type={_site}&pid={_id1}&vid={_id2}",
"customParameters": {
"parameters": [
{
"key": "site",
"value": "yahoo"
},
{
"key": "id1",
"value": "1234"
},
{
"key": "id2",
"value": "a7h59A98yu"
}
]
},
"urlReviewData": {
"urlApprovalStatus": "APPROVED"
},
"adGroupAdRotationMode": {
"adRotationMode": "OPTIMIZE"
}
}
}
}
}
"""
def main():
client = promotionalads.PromotionalAdsClient.LoadFromConfiguration()
service = client.GetService(SERVICE)
print("REQUEST : {}.{}\n{}".format(SERVICE, OPERATION, json.dumps(OPERAND, indent=2)))
try:
if OPERATION == "get":
response = service.get(OPERAND)
elif OPERATION.startswith("get"):
get_method = getattr(service, OPERATION)
response = get_method(OPERAND)
elif OPERATION.startswith("mutate"):
response = service.mutate(OPERAND)
else:
raise("Unknown Operation '{}'".format(OPERATION))
print("RESPONSE :\n{}".format(response))
except Exception as e:
print("Exception at '{}' operations \n{}".format(SERVICE, e))
raise e
if __name__ == '__main__':
main()
|
from django import forms
from django.forms import widgets
from aspc.college.models import Building
from aspc.housing.models import Review, Room, Suite
from aspc.housing.forms.widgets import ColumnCheckboxSelectMultiple, RatingRadioFieldRenderer
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
rating_widgets = {
'quiet': widgets.RadioSelect(renderer=RatingRadioFieldRenderer),
'spacious': widgets.RadioSelect(renderer=RatingRadioFieldRenderer),
'temperate': widgets.RadioSelect(renderer=RatingRadioFieldRenderer),
'maintained': widgets.RadioSelect(renderer=RatingRadioFieldRenderer),
'cellphone': widgets.RadioSelect(renderer=RatingRadioFieldRenderer),
'best': widgets.Textarea(attrs={'rows':5, 'cols':60,}),
'worst': widgets.Textarea(attrs={'rows':5, 'cols':60,}),
'comments': widgets.Textarea(attrs={'rows':5, 'cols':60,}),
}
class NewReviewForm(forms.ModelForm):
building = forms.ModelChoiceField(queryset=Building.objects.filter(type=Building.TYPES_LOOKUP['Dormitory']).order_by('name'))
room_number = forms.CharField()
def clean(self):
building = self.cleaned_data.get('building')
room_number = self.cleaned_data.get('room_number')
try:
room = Room.objects.get(floor__building=building, number=room_number)
except Room.DoesNotExist:
raise ValidationError("No matching room found")
self.cleaned_data['room'] = room
return self.cleaned_data
def save(self, *args, **kwargs):
# keep passed-in commit arg, default True
commit = kwargs.get('commit', True)
# don't save to db at first so we can populate instance.room
kwargs['commit'] = False
instance = super(NewReviewForm, self).save(*args, **kwargs)
instance.room = self.cleaned_data['room']
if commit:
instance.save()
return instance
class Meta:
model = Review
exclude = ('create_ts', 'room')
widgets = rating_widgets
class ReviewRoomForm(forms.ModelForm):
class Meta:
model = Review
exclude = ('create_ts', 'room')
widgets = rating_widgets
SEARCH_ORDERING = (
(('average_rating',), "highest rated"),
(('size', 'average_rating_spacious'), "largest"),
(('average_rating_quiet',), "quietest"),
(('average_rating_temperate',), "most temperate"),
(('average_rating_maintained',), "best condition"),
(('average_rating_cellphone',), "best cell reception"),
)
ORDERING_CHOICES = tuple(enumerate((a[1] for a in SEARCH_ORDERING)))
class SearchForm(forms.Form):
prefer = forms.TypedChoiceField(choices=ORDERING_CHOICES, coerce=int, empty_value=(), help_text="rooms first")
buildings = forms.ModelMultipleChoiceField(
queryset=Building.objects.filter(type=Building.TYPES_LOOKUP['Dormitory']),
required=False,
widget=ColumnCheckboxSelectMultiple(columns=3, css_class="col buildings"),
)
size = forms.FloatField(required=False, help_text="square feet or larger")
occupancy = forms.TypedMultipleChoiceField(
choices=Room.OCCUPANCY_TYPES,
required=False,
widget=ColumnCheckboxSelectMultiple(columns=2, css_class="col"),
coerce=int,
empty_value=(),
)
class RefineForm(forms.Form):
prefer = forms.TypedChoiceField(choices=ORDERING_CHOICES, coerce=int, empty_value=(), help_text="rooms first")
buildings = forms.ModelMultipleChoiceField(
queryset=Building.objects.filter(type=Building.TYPES_LOOKUP['Dormitory']),
required=False,
widget=ColumnCheckboxSelectMultiple(columns=2, css_class="col buildings"),
)
size = forms.FloatField(required=False, help_text=mark_safe("ft<sup>2</sup> or larger"))
occupancy = forms.TypedMultipleChoiceField(
choices=Room.OCCUPANCY_TYPES,
required=False,
widget=ColumnCheckboxSelectMultiple(columns=2, css_class="col"),
coerce=int,
empty_value=(),
)
class RaffleForm(forms.Form):
start_date = forms.DateField(widget=widgets.DateInput(attrs={
'placeholder': 'YYYY-MM-DD',
}))
end_date = forms.DateField(widget=widgets.DateInput(attrs={
'placeholder': 'YYYY-MM-DD',
}))
num_winners = forms.IntegerField(initial=1, min_value=1)
|
import importlib
import traceback
import logging
from aiohttp_session import get_session
from aiohttp import web
from brome.core import exceptions
from brome.webserver.server.server_decorator import (
require,
exception_handler,
csrf_protected
)
from brome.webserver.server.auth import get_user_from_session
logger = logging.getLogger('bromewebserver')
class CRUD(web.View):
action_list = [
'create',
'read',
'update',
'delete'
]
async def get_json_data(self):
try:
req_data = await self.request.json()
actions = req_data['actions']
# Convert actions to list if necessary
if not type(actions) == list:
actions = [actions]
logger.debug('actions = {actions}'.format(
actions=actions
))
return actions
except:
raise exceptions.InvalidRequestException(
'No json send or missing actions parameters'
)
def import_model(self, model):
try:
m = importlib.import_module(
'brome.model.{model}'.format(model=model)
)
return getattr(m, model.title())
except ImportError:
raise exceptions.ModelImportException(
'{model} not found'.format(model=model)
)
def trim_response_data(self, success, response_data, error):
if len(response_data) == 1:
return response_data[0]
else:
data = {'success': success, 'results': response_data}
if error:
data['error'] = error
return data
@exception_handler()
@csrf_protected()
@require('login')
async def post(self):
actions = await self.get_json_data()
session = await get_session(self.request)
author = get_user_from_session(session, self.request.db_session)
read_context = {
'author': author,
'db_session': self.request.db_session,
'ws_session': session,
'method': 'read',
'queue': self.request.app.queue,
}
action_context = {
'author': author,
'db_session': self.request.db_session,
'ws_session': session,
'queue': self.request.app.queue
}
success = True
response_data = []
error = []
for index, action in enumerate(actions):
try:
# RESPONSE
response_data.append(dict())
response_data[index]['success'] = True
response_data[index]['results'] = []
# ACTION
action_name = action.get('action')
if not action_name:
raise exceptions.InvalidRequestException(
'Missing action in actions[{index}]'
.format(
index=index
)
)
elif action_name not in self.action_list:
raise exceptions.InvalidRequestException(
'Invalid action name: "{action_name}"'
.format(
action_name=action_name
)
)
action_context['method'] = action_name
# IMPORT MODEL
model_name = action.get('model')
if not model_name:
raise exceptions.InvalidRequestException(
'Missing model in action'
)
model_class = self.import_model(model_name)
# CREATE
if action_name == 'create':
results = [model_class()]
response_data[index]['total'] = 1
# QUERY
else:
# READ SPECIFIC RECORD
uid = action.get('uid')
if uid:
base_query = self.request.db_session\
.query(model_class)\
.filter(model_class.mongo_id == uid)
# BATCH
elif action.get('uids'):
uids = action.get('uids')
base_query = self.request.db_session.query(model_class)
response_data[index]['total'] = len(uids)
base_query = base_query.in_('mongo_id', *uids)
results = base_query.all()
else:
filters = action.get('filters')
limit = action.get('limit')
skip = action.get('skip')
descending = action.get('descending')
ascending = action.get('ascending')
base_query = self.request.db_session.query(model_class)
if limit:
base_query = base_query.limit(limit)
if skip:
base_query = base_query.skip(skip)
if descending:
base_query = base_query.descending(descending)
if ascending:
base_query = base_query.ascending(ascending)
if filters:
if 'uid' in filters:
filters['mongo_id'] = filters['uid']
del filters['uid']
base_query = base_query.filter_by(**filters)
response_data[index]['total'] = base_query.count()
results = base_query.all()
# PROCESSING RESULTS
for result in results:
# AUTHORIZATION CHECK
logger.debug(
'action_context = {action_context}'
.format(
action_context=action_context
)
)
if not await result.method_autorized(action_context):
raise exceptions.NotAuthorizedException(
'{author} not authorized to {action_name} {result}'
.format(
author=author,
action_name=action_name,
result=result
)
)
# APPLY ACTION
# CREATE & UPDATE
if action_name in ['create', 'update']:
data = action.get('data')
if not data:
raise exceptions.InvalidRequestException(
'Missing data in action'
)
action_context['data'] = data
sane_data = await result.sanitize_data(
action_context
)
action_context['data'] = sane_data
# BEFORE HOOK
await getattr(
result,
'before_{action_name}'
.format(
action_name=action_name
)
)(action_context)
await result.validate_and_save(action_context)
# AFTER HOOK
await getattr(
result,
'after_{action_name}'
.format(
action_name=action_name
)
)(action_context)
# DELETE
elif action_name == 'delete':
await result.before_delete(action_context)
self.request.db_session.remove(result, safe=True)
await result.after_delete(action_context)
if not action.get('total_only', False) \
and not action_name == 'delete':
# READ
# NOTE the authorization check has already
# been performed for the read
if not action_name == 'read':
logger.debug(
'read_context = {read_context}'
.format(
read_context=read_context
)
)
if not await result.method_autorized(read_context):
raise exceptions.NotAuthorizedException(
'{author} not authorized to {action_name} {result}' # noqa
.format(
author=author,
action_name=read_context.get('method'),
result=result
)
)
response_data[index]['results'].append(
await result.serialize(read_context)
)
except Exception as e:
success = False
tb = traceback.format_exc()
logger.error(
'Request HandledException<{exception}>'
.format(exception=str(tb))
)
if isinstance(e, exceptions.ServerBaseException):
error_msg = e.get_name()
else:
error_msg = 'ServerSideError'
response_data[index] = {
'success': False,
'error': error_msg
}
error.append(error_msg)
# RESPONSE
trimmed_response_data = self.trim_response_data(
success,
response_data,
error
)
return web.json_response(trimmed_response_data)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0007_auto_20161002_1715'),
]
operations = [
migrations.CreateModel(
name='FoodFest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField()),
('desc', models.CharField(null=True, max_length=200, blank=True)),
('link', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='adminuploads/sponsors/')),
],
),
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField()),
('desc', models.CharField(null=True, max_length=200, blank=True)),
('link', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='adminuploads/sponsors/')),
('types', models.CharField(choices=[('Our Sponsors', 'Our Sponsors'), ('Our Media Partners', 'Our Media Partners'), ('Our Associations', 'Our Associations')], max_length=100)),
],
),
migrations.RenameField(
model_name='event',
old_name='event_category',
new_name='category',
),
migrations.RenameField(
model_name='event',
old_name='eventDate',
new_name='date',
),
migrations.RenameField(
model_name='event',
old_name='event_desc',
new_name='desc',
),
migrations.RenameField(
model_name='event',
old_name='eventpic',
new_name='img',
),
migrations.RenameField(
model_name='event',
old_name='eventName',
new_name='name',
),
migrations.RenameField(
model_name='event',
old_name='eventRules',
new_name='rules',
),
migrations.RenameField(
model_name='event',
old_name='event_type',
new_name='types',
),
migrations.RenameField(
model_name='rounds',
old_name='roundDay',
new_name='day',
),
migrations.RenameField(
model_name='rounds',
old_name='roundLocation',
new_name='location',
),
migrations.RenameField(
model_name='rounds',
old_name='roundTime',
new_name='time',
),
migrations.RenameField(
model_name='rounds',
old_name='roundTitle',
new_name='title',
),
]
|
#!/usr/bin/python3
"""Creates DOT graph to visualize ninja build timing.
Given a ninja build directoy, this program uses the tools from
https://github.com/catapult-project/catapult.git
https://github.com/nico/ninjatracing.git
to post-process the ".ninja_log" file into an execution trace that can
then be plotted and viewed on a web browser.
"""
import getopt
import os
import sys
import script_utils as u
# Output html file
flag_outfile = "trace.html"
# Path to "ninjatracing" repo
flag_ninjatracing = "/ssd2/ninjatracing/ninjatracing"
# Path to "catapult" repo
flag_catapult = "/ssd2/catapult"
# Dryrun, echo flags
flag_echo = False
flag_dryrun = False
def perform():
"""Main driver routine."""
# Check for existence of ninja log file
if not os.path.exists(".ninja_log"):
u.error("unable to access .ninja_log file")
# Generate json file from ninja log
if flag_dryrun or flag_echo:
u.verbose(0, "%s .ninja_log > trace.json" % flag_ninjatracing)
if not flag_dryrun:
u.docmdout("%s .ninja_log" % flag_ninjatracing, "trace.json")
# Generate trace.html file from json
cmd = ("%s/tracing/bin/trace2html trace.json "
"--output=%s" % (flag_catapult, flag_outfile))
if flag_dryrun or flag_echo:
u.verbose(0, cmd)
if not flag_dryrun:
u.docmd(cmd)
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options] [-o output]
options:
-d increase debug msg verbosity level
-D dry run mode (echo commands but do not execute)
-o F write output HTML to file F
-C C use catapult repo in directory C
-N N use ninjatracing repo in directory N
Default output file (if -o option not used) is 'trace.html'.
""" % os.path.basename(sys.argv[0]))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_outfile, flag_echo, flag_dryrun
global flag_ninjatracing, flag_catapult
try:
optlist, args = getopt.getopt(sys.argv[1:], "edDo:N:C:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
if args:
usage("extra unknown arguments")
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-D":
flag_dryrun = True
elif opt == "-e":
flag_echo = True
elif opt == "-N":
flag_ninjatracing = arg
elif opt == "-C":
flag_catapult = arg
elif opt == "-o":
flag_outfile = arg
# Setup
u.setdeflanglocale()
parse_args()
perform()
|
import DeepFried2 as df
from .. import dfext
def mknet():
net = df.Sequential(
# -> 128x48
df.SpatialConvolutionCUDNN(3, 128, (7,7), border='same', bias=None, init=df.init.prelu()),
df.BatchNormalization(128, 0.95), df.ReLU(),
dfext.nextblock_b(128, cardin=16, chan_mid=4),
df.PoolingCUDNN((2,2)), # -> 64x24
dfext.nextblock_b(128, cardin=16, chan_mid=4),
dfext.nextblock_b(128, cardin=16, chan_mid=4),
dfext.nextblock_b(128, cardin=16, chan_mid=4, chan_out=256),
df.PoolingCUDNN((2,2)), # -> 32x12
dfext.nextblock_b(256, cardin=16, chan_mid=8),
dfext.nextblock_b(256, cardin=16, chan_mid=8),
df.PoolingCUDNN((2,2)), # -> 16x6
dfext.nextblock_b(256, cardin=16, chan_mid=8),
dfext.nextblock_b(256, cardin=16, chan_mid=8),
dfext.nextblock_b(256, cardin=16, chan_mid=8, chan_out=512),
df.PoolingCUDNN((2,2)), # -> 8x3
dfext.nextblock_b(512, cardin=16, chan_mid=16),
dfext.nextblock_b(512, cardin=16, chan_mid=16),
df.PoolingCUDNN((8,3), mode='avg'),
df.SpatialConvolutionCUDNN(512, 256, (1,1), bias=None, init=df.init.prelu()),
df.BatchNormalization(256, 0.95), df.ReLU(),
df.StoreOut(df.SpatialConvolutionCUDNN(256, 128, (1,1)))
)
net.emb_mod = net[-1]
net.in_shape = (128, 48)
net.scale_factor = None # TODO
print("Net has {:.2f}M params".format(df.utils.count_params(net)/1000/1000), flush=True)
return net
|
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def loadDataMooreProblem():
# some numbers show up as 1,170,000,000 (commas)
# some numbers have references in square brackets after them
X = []
Y = []
non_decimal = re.compile(r'[^\d]+')
for line in open('moore.csv'):
r = line.split('\t')
x = int(non_decimal.sub('', r[2].split('[')[0]))
y = int(non_decimal.sub('', r[1].split('[')[0]))
X.append(x)
Y.append(y)
X = np.array(X)
Y = np.array(Y)
plt.scatter(X, Y)
plt.show()
# Transfom exponential to linear representation (exp to log)
Y = np.log(Y)
return X, Y
def loadDataWeightsProblem():
data = pd.read_csv("brain-vs-body-weights.txt", names=["index", "brainWeight", "bodyWeight"], delimiter=",", skiprows=33)
X_df = pd.DataFrame(data.brainWeight)
Y_df = pd.DataFrame(data.bodyWeight)
plt.scatter(X_df, Y_df)
plt.show()
X = np.array(X_df).T.flatten()
Y = np.array(Y_df).T.flatten()
return X, Y
def loadDataFoodTruckProfitsProblem():
data = pd.read_csv("foodtruck-profits.txt", names=["unitsSold", "profit"], delimiter=",")
X_df = pd.DataFrame(data.unitsSold)
Y_df = pd.DataFrame(data.profit)
plt.scatter(X_df, Y_df)
plt.show()
X = np.array(X_df).T.flatten()
Y = np.array(Y_df).T.flatten()
return X, Y
def plotDataAndPrediction(X,Y,Yhat):
plt.scatter(X, Y)
plt.plot(X, Yhat)
plt.show()
## yHat = ax + b
def calculateCoefficients(X, Y):
denominator = X.dot(X) - X.mean() * X.sum()
a = ( X.dot(Y) - Y.mean()*X.sum() ) / denominator
b = ( Y.mean() * X.dot(X) - X.mean() * X.dot(Y) ) / denominator
return a, b
def calculateYhat(a, b, X):
Yhat = a*X + b
return Yhat
def calculateRSquared(Y, Yhat):
d1 = Y - Yhat
d2 = Y - Y.mean()
r2 = 1 - d1.dot(d1) / d2.dot(d2)
return r2
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class Group(object):
''' a group of ansible hosts '''
''' inventory Group 基类 '''
# 使用__slots__ 限制类属性的访问
__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
'''
Group基类,包含一下属性:
depth,Group深度,添加子group的时候会进行深度探测
name,组名
hosts,该组下的host对象列表
vars,该组的组变量
child_groups,子组,组允许嵌套,子组也允许有子组,在添加子组的时候要同时在父组内添加子组,并在子组内添加父组。
parent_groups,父组,既该组是哪一组的子组,在添加子组的时候要同时在父组内添加子组,并在子组内添加父组。
_host_cache,用来缓存host数据
'''
self.depth = 0
self.name = name
self.hosts = []
self.vars = {}
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
#self.clear_hosts_cache()
if self.name is None:
raise Exception("group name is required")
def add_child_group(self, group):
'''
添加子group到当前group,同时探测子组的深度,修改当前group的深度,并在子组中将该组添加成父组。
'''
if self == group:
raise Exception("can't add group to itself")
# don't add if it's already there
if not group in self.child_groups:
self.child_groups.append(group)
# update the depth of the child
group.depth = max([self.depth+1, group.depth])
# update the depth of the grandchildren
group._check_children_depth()
# now add self to child's parent_groups list, but only if there
# isn't already a group with the same name
# 在本组中添加完成子组后,需要在子组中添加该组为父组。
if not self.name in [g.name for g in group.parent_groups]:
group.parent_groups.append(self)
self.clear_hosts_cache() # 清理缓存
def _check_children_depth(self):
for group in self.child_groups:
group.depth = max([self.depth+1, group.depth])
group._check_children_depth()
def add_host(self, host):
''' 添加新的host对象到该group,同时在该host对象中设置该host属于哪个组,一个host可能通过子组的方式属于多个组。'''
self.hosts.append(host)
host.add_group(self)
self.clear_hosts_cache()
def set_variable(self, key, value):
# 设置变量
self.vars[key] = value
def clear_hosts_cache(self):
# 清理host缓存,同时清理父组的host缓存,以保证子组在host变更的同时,父组也会变更。
self._hosts_cache = None
for g in self.parent_groups:
g.clear_hosts_cache()
def get_hosts(self):
# 获取host对象,同时进行缓存
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
def _get_hosts(self):
# 返回当前group的所有host对象列表,去重
hosts = [] # hosts为最终返回的host对象列表
seen = {} # seen用来判断是否重复处理
for kid in self.child_groups:
kid_hosts = kid.get_hosts()
for kk in kid_hosts:
if kk not in seen:
seen[kk] = 1
hosts.append(kk)
for mine in self.hosts:
if mine not in seen:
seen[mine] = 1
hosts.append(mine)
return hosts
def get_variables(self):
# 获取变量的副本
return self.vars.copy()
def _get_ancestors(self):
# 嵌套获取所有父group对象
results = {}
for g in self.parent_groups:
results[g.name] = g
results.update(g._get_ancestors())
return results
def get_ancestors(self):
# 获取所有父group对象
return self._get_ancestors().values()
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_middleware import request_id
import webob
from neutron import auth
from neutron.tests import base
class NeutronKeystoneContextTestCase(base.BaseTestCase):
def setUp(self):
super(NeutronKeystoneContextTestCase, self).setUp()
@webob.dec.wsgify
def fake_app(req):
self.context = req.environ['neutron.context']
return webob.Response()
self.context = None
self.middleware = auth.NeutronKeystoneContext(fake_app)
self.request = webob.Request.blank('/')
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
def test_no_user_id(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '401 Unauthorized')
def test_with_user_id(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
self.assertEqual(self.context.user, 'testuserid')
def test_with_tenant_id(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'test_user_id'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.tenant_id, 'testtenantid')
self.assertEqual(self.context.tenant, 'testtenantid')
def test_roles_no_admin(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.roles, ['role1', 'role2', 'role3',
'role4', 'role5'])
self.assertEqual(self.context.is_admin, False)
def test_roles_with_admin(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,'
'AdMiN')
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.roles, ['role1', 'role2', 'role3',
'role4', 'role5', 'AdMiN'])
self.assertTrue(self.context.is_admin)
def test_with_user_tenant_name(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_PROJECT_NAME'] = 'testtenantname'
self.request.headers['X_USER_NAME'] = 'testusername'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
self.assertEqual(self.context.user_name, 'testusername')
self.assertEqual(self.context.tenant_id, 'testtenantid')
self.assertEqual(self.context.tenant_name, 'testtenantname')
def test_request_id_extracted_from_env(self):
req_id = 'dummy-request-id'
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.environ[request_id.ENV_REQUEST_ID] = req_id
self.request.get_response(self.middleware)
self.assertEqual(req_id, self.context.request_id)
def test_with_auth_token(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.auth_token, 'testauthtoken')
def test_without_auth_token(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
del self.request.headers['X_AUTH_TOKEN']
self.request.get_response(self.middleware)
self.assertIsNone(self.context.auth_token)
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.swob import Request, HTTPUnauthorized
from swift.common.middleware import container_quotas
class FakeCache(object):
def __init__(self, val):
if 'status' not in val:
val['status'] = 200
self.val = val
def get(self, *args):
return self.val
class FakeApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
start_response('200 OK', [])
return []
class FakeMissingApp(object):
def __init__(self):
pass
def __call__(self, env, start_response):
start_response('404 Not Found', [])
return []
def start_response(*args):
pass
class TestContainerQuotas(unittest.TestCase):
def test_split_path_empty_container_path_segment(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a//something/something_else',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': {'key':'value'}})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_not_handled(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'PUT'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_no_quotas(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': FakeCache({}),
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_exceed_bytes_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '2'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEquals(res.status_int, 413)
def test_not_exceed_bytes_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_exceed_counts_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEquals(res.status_int, 413)
def test_not_exceed_counts_quota(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '2'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEquals(res.status_int, 200)
def test_invalid_quotas(self):
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': 'abc'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEquals(res.status_int, 400)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_COUNT': 'abc'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEquals(res.status_int, 400)
def test_valid_quotas(self):
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': '123'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEquals(res.status_int, 200)
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_COUNT': '123'})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEquals(res.status_int, 200)
def test_delete_quotas(self):
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_CONTAINER_META_QUOTA_BYTES': None})
res = req.get_response(
container_quotas.ContainerQuotaMiddleware(FakeApp(), {}))
self.assertEquals(res.status_int, 200)
def test_missing_container(self):
app = container_quotas.ContainerQuotaMiddleware(FakeMissingApp(), {})
cache = FakeCache({'bytes': 0, 'meta': {'quota-bytes': '100'}})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
res = req.get_response(app)
self.assertEquals(res.status_int, 404)
def test_auth_fail(self):
app = container_quotas.ContainerQuotaMiddleware(FakeApp(), {})
cache = FakeCache({'object_count': 1, 'meta': {'quota-count': '1'},
'write_acl': None})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.authorize': lambda *args: HTTPUnauthorized()})
res = req.get_response(app)
self.assertEquals(res.status_int, 401)
if __name__ == '__main__':
unittest.main()
|
from functools import partial
from django.contrib import messages as django_messages
from django.template import loader
from django.utils import safestring
import jinja2
from rest_framework.request import Request
"""
This file was created because AMO wants to have multi-line messages including a
title and some content. Django's messages framework only takes a single
string.
Importing this file should behave exactly like Django's messages framework
except it will take a 3rd argument as message content (the second is the
message title).
"""
class DoubleSafe(safestring.SafeText, jinja2.Markup):
"""Double safe all the way: marks safe for django and jinja2.
Even though we're using jinja2 for most of the template rendering, we may
have places where it's Django deciding whether the data is safe or not. An
example is the messaging framework. If we add a new message that is marked
safe for jinja2 (using a Markup object), it's not persisted that way by
Django, and we thus loose the "safeness" of the message.
This serves to give us the best of both worlds.
"""
def _make_message(title=None, message=None, title_safe=False,
message_safe=False):
context = {
'title': title, 'message': message,
'title_safe': title_safe, 'message_safe': message_safe}
tpl = loader.get_template('message_content.html').render(context)
return DoubleSafe(tpl)
def _is_dupe(msg, request):
"""Returns whether a particular message is already cued for display."""
storage = django_messages.get_messages(request)
# If there are no messages stored, Django doesn't give us a proper storage
# object, so just bail early.
if not storage:
return False
try:
smsg = str(msg)
is_dupe = False
for message in storage:
if str(message) == smsg:
# We can't return from here because we need to tell Django not
# to consume the messages.
is_dupe = True
break
except (UnicodeDecodeError, UnicodeEncodeError):
return False
storage.used = False
return is_dupe
def _file_message(type_, request, title, message=None, extra_tags='',
fail_silently=False, title_safe=False, message_safe=False):
msg = _make_message(title, message, title_safe, message_safe)
# Don't save duplicates.
if _is_dupe(msg, request):
return
if isinstance(request, Request):
# Support for passing of django-rest-framework wrapped request objects
request = request._request
getattr(django_messages, type_)(request, msg, extra_tags, fail_silently)
debug = partial(_file_message, 'debug')
info = partial(_file_message, 'info')
success = partial(_file_message, 'success')
warning = partial(_file_message, 'warning')
error = partial(_file_message, 'error')
|
# Copyright (c) 2011-2014 Greg Holt
# Copyright (c) 2012-2013 John Dickinson
# Copyright (c) 2012 Felipe Reyes
# Copyright (c) 2012 Peter Portante
# Copyright (c) 2012 Victor Rodionov
# Copyright (c) 2013-2014 Samuel Merritt
# Copyright (c) 2013 Chuck Thier
# Copyright (c) 2013 David Goetz
# Copyright (c) 2013 Dirk Mueller
# Copyright (c) 2013 Donagh McCabe
# Copyright (c) 2013 Fabien Boucher
# Copyright (c) 2013 Greg Lange
# Copyright (c) 2013 Kun Huang
# Copyright (c) 2013 Richard Hawkins
# Copyright (c) 2013 Tong Li
# Copyright (c) 2013 ZhiQiang Fan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TempURL Middleware
Allows the creation of URLs to provide temporary access to objects.
For example, a website may wish to provide a link to download a large
object in Swift, but the Swift account has no public access. The
website can generate a URL that will provide GET access for a limited
time to the resource. When the web browser user clicks on the link,
the browser will download the object directly from Swift, obviating
the need for the website to act as a proxy for the request.
If the user were to share the link with all his friends, or
accidentally post it on a forum, etc. the direct access would be
limited to the expiration time set when the website created the link.
To create such temporary URLs, first an X-Account-Meta-Temp-URL-Key
header must be set on the Swift account. Then, an HMAC-SHA1 (RFC 2104)
signature is generated using the HTTP method to allow (GET, PUT,
DELETE, etc.), the Unix timestamp the access should be allowed until,
the full path to the object, and the key set on the account.
For example, here is code generating the signature for a GET for 60
seconds on /v1/AUTH_account/container/object::
import hmac
from hashlib import sha1
from time import time
method = 'GET'
expires = int(time() + 60)
path = '/v1/AUTH_account/container/object'
key = 'mykey'
hmac_body = '%s\\n%s\\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
Be certain to use the full path, from the /v1/ onward.
Let's say the sig ends up equaling
da39a3ee5e6b4b0d3255bfef95601890afd80709 and expires ends up
1323479485. Then, for example, the website could provide a link to::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485
Any alteration of the resource path or query arguments would result
in 401 Unauthorized. Similarly, a PUT where GET was the allowed method
would 401. HEAD is allowed if GET, PUT, or POST is allowed.
Using this in combination with browser form post translation
middleware could also allow direct-from-browser uploads to specific
locations in Swift.
TempURL supports both account and container level keys. Each allows up to two
keys to be set, allowing key rotation without invalidating all existing
temporary URLs. Account keys are specified by X-Account-Meta-Temp-URL-Key and
X-Account-Meta-Temp-URL-Key-2, while container keys are specified by
X-Container-Meta-Temp-URL-Key and X-Container-Meta-Temp-URL-Key-2.
Signatures are checked against account and container keys, if
present.
With GET TempURLs, a Content-Disposition header will be set on the
response so that browsers will interpret this as a file attachment to
be saved. The filename chosen is based on the object name, but you
can override this with a filename query parameter. Modifying the
above example::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&filename=My+Test+File.pdf
If you do not want the object to be downloaded, you can cause
"Content-Disposition: inline" to be set on the response by adding the "inline"
parameter to the query string, like so::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&inline
"""
__all__ = ['TempURL', 'filter_factory',
'DEFAULT_INCOMING_REMOVE_HEADERS',
'DEFAULT_INCOMING_ALLOW_HEADERS',
'DEFAULT_OUTGOING_REMOVE_HEADERS',
'DEFAULT_OUTGOING_ALLOW_HEADERS']
from os.path import basename
from time import time
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlencode
from swift.proxy.controllers.base import get_account_info, get_container_info
from swift.common.swob import HeaderKeyDict, HTTPUnauthorized, HTTPBadRequest
from swift.common.utils import split_path, get_valid_utf8_str, \
register_swift_info, get_hmac, streq_const_time, quote
DISALLOWED_INCOMING_HEADERS = 'x-object-manifest'
#: Default headers to remove from incoming requests. Simply a whitespace
#: delimited list of header names and names can optionally end with '*' to
#: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of
#: exceptions to these removals.
DEFAULT_INCOMING_REMOVE_HEADERS = 'x-timestamp'
#: Default headers as exceptions to DEFAULT_INCOMING_REMOVE_HEADERS. Simply a
#: whitespace delimited list of header names and names can optionally end with
#: '*' to indicate a prefix match.
DEFAULT_INCOMING_ALLOW_HEADERS = ''
#: Default headers to remove from outgoing responses. Simply a whitespace
#: delimited list of header names and names can optionally end with '*' to
#: indicate a prefix match. DEFAULT_OUTGOING_ALLOW_HEADERS is a list of
#: exceptions to these removals.
DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*'
#: Default headers as exceptions to DEFAULT_OUTGOING_REMOVE_HEADERS. Simply a
#: whitespace delimited list of header names and names can optionally end with
#: '*' to indicate a prefix match.
DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*'
CONTAINER_SCOPE = 'container'
ACCOUNT_SCOPE = 'account'
def get_tempurl_keys_from_metadata(meta):
"""
Extracts the tempurl keys from metadata.
:param meta: account metadata
:returns: list of keys found (possibly empty if no keys set)
Example:
meta = get_account_info(...)['meta']
keys = get_tempurl_keys_from_metadata(meta)
"""
return [get_valid_utf8_str(value) for key, value in meta.items()
if key.lower() in ('temp-url-key', 'temp-url-key-2')]
def disposition_format(filename):
return '''attachment; filename="%s"; filename*=UTF-8''%s''' % (
quote(filename, safe=' /'), quote(filename))
def authorize_same_account(account_to_match):
def auth_callback_same_account(req):
try:
_ver, acc, _rest = req.split_path(2, 3, True)
except ValueError:
return HTTPUnauthorized(request=req)
if acc == account_to_match:
return None
else:
return HTTPUnauthorized(request=req)
return auth_callback_same_account
def authorize_same_container(account_to_match, container_to_match):
def auth_callback_same_container(req):
try:
_ver, acc, con, _rest = req.split_path(3, 4, True)
except ValueError:
return HTTPUnauthorized(request=req)
if acc == account_to_match and con == container_to_match:
return None
else:
return HTTPUnauthorized(request=req)
return auth_callback_same_container
class TempURL(object):
"""
WSGI Middleware to grant temporary URLs specific access to Swift
resources. See the overview for more information.
This middleware understands the following configuration settings::
incoming_remove_headers
The headers to remove from incoming requests. Simply a
whitespace delimited list of header names and names can
optionally end with '*' to indicate a prefix match.
incoming_allow_headers is a list of exceptions to these
removals.
Default: x-timestamp
incoming_allow_headers
The headers allowed as exceptions to
incoming_remove_headers. Simply a whitespace delimited
list of header names and names can optionally end with
'*' to indicate a prefix match.
Default: None
outgoing_remove_headers
The headers to remove from outgoing responses. Simply a
whitespace delimited list of header names and names can
optionally end with '*' to indicate a prefix match.
outgoing_allow_headers is a list of exceptions to these
removals.
Default: x-object-meta-*
outgoing_allow_headers
The headers allowed as exceptions to
outgoing_remove_headers. Simply a whitespace delimited
list of header names and names can optionally end with
'*' to indicate a prefix match.
Default: x-object-meta-public-*
methods
A whitespace delimited list of request methods that are
allowed to be used with a temporary URL.
Default: 'GET HEAD PUT POST DELETE'
The proxy logs created for any subrequests made will have swift.source set
to "TU".
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf,
methods=('GET', 'HEAD', 'PUT', 'POST', 'DELETE')):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
#: The methods allowed with Temp URLs.
self.methods = methods
self.disallowed_headers = set(
'HTTP_' + h.upper().replace('-', '_')
for h in DISALLOWED_INCOMING_HEADERS.split())
headers = DEFAULT_INCOMING_REMOVE_HEADERS
if 'incoming_remove_headers' in conf:
headers = conf['incoming_remove_headers']
headers = \
['HTTP_' + h.upper().replace('-', '_') for h in headers.split()]
#: Headers to remove from incoming requests. Uppercase WSGI env style,
#: like `HTTP_X_PRIVATE`.
self.incoming_remove_headers = [h for h in headers if h[-1] != '*']
#: Header with match prefixes to remove from incoming requests.
#: Uppercase WSGI env style, like `HTTP_X_SENSITIVE_*`.
self.incoming_remove_headers_startswith = \
[h[:-1] for h in headers if h[-1] == '*']
headers = DEFAULT_INCOMING_ALLOW_HEADERS
if 'incoming_allow_headers' in conf:
headers = conf['incoming_allow_headers']
headers = \
['HTTP_' + h.upper().replace('-', '_') for h in headers.split()]
#: Headers to allow in incoming requests. Uppercase WSGI env style,
#: like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY`.
self.incoming_allow_headers = [h for h in headers if h[-1] != '*']
#: Header with match prefixes to allow in incoming requests. Uppercase
#: WSGI env style, like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY_*`.
self.incoming_allow_headers_startswith = \
[h[:-1] for h in headers if h[-1] == '*']
headers = DEFAULT_OUTGOING_REMOVE_HEADERS
if 'outgoing_remove_headers' in conf:
headers = conf['outgoing_remove_headers']
headers = [h.title() for h in headers.split()]
#: Headers to remove from outgoing responses. Lowercase, like
#: `x-account-meta-temp-url-key`.
self.outgoing_remove_headers = [h for h in headers if h[-1] != '*']
#: Header with match prefixes to remove from outgoing responses.
#: Lowercase, like `x-account-meta-private-*`.
self.outgoing_remove_headers_startswith = \
[h[:-1] for h in headers if h[-1] == '*']
headers = DEFAULT_OUTGOING_ALLOW_HEADERS
if 'outgoing_allow_headers' in conf:
headers = conf['outgoing_allow_headers']
headers = [h.title() for h in headers.split()]
#: Headers to allow in outgoing responses. Lowercase, like
#: `x-matches-remove-prefix-but-okay`.
self.outgoing_allow_headers = [h for h in headers if h[-1] != '*']
#: Header with match prefixes to allow in outgoing responses.
#: Lowercase, like `x-matches-remove-prefix-but-okay-*`.
self.outgoing_allow_headers_startswith = \
[h[:-1] for h in headers if h[-1] == '*']
#: HTTP user agent to use for subrequests.
self.agent = '%(orig)s TempURL'
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'OPTIONS':
return self.app(env, start_response)
info = self._get_temp_url_info(env)
temp_url_sig, temp_url_expires, filename, inline_disposition = info
if temp_url_sig is None and temp_url_expires is None:
return self.app(env, start_response)
if not temp_url_sig or not temp_url_expires:
return self._invalid(env, start_response)
account, container = self._get_account_and_container(env)
if not account:
return self._invalid(env, start_response)
keys = self._get_keys(env)
if not keys:
return self._invalid(env, start_response)
if env['REQUEST_METHOD'] == 'HEAD':
hmac_vals = (
self._get_hmacs(env, temp_url_expires, keys) +
self._get_hmacs(env, temp_url_expires, keys,
request_method='GET') +
self._get_hmacs(env, temp_url_expires, keys,
request_method='POST') +
self._get_hmacs(env, temp_url_expires, keys,
request_method='PUT'))
else:
hmac_vals = self._get_hmacs(env, temp_url_expires, keys)
is_valid_hmac = False
hmac_scope = None
for hmac, scope in hmac_vals:
# While it's true that we short-circuit, this doesn't affect the
# timing-attack resistance since the only way this will
# short-circuit is when a valid signature is passed in.
if streq_const_time(temp_url_sig, hmac):
is_valid_hmac = True
hmac_scope = scope
break
if not is_valid_hmac:
return self._invalid(env, start_response)
# disallowed headers prevent accidently allowing upload of a pointer
# to data that the PUT tempurl would not otherwise allow access for.
# It should be safe to provide a GET tempurl for data that an
# untrusted client just uploaded with a PUT tempurl.
resp = self._clean_disallowed_headers(env, start_response)
if resp:
return resp
self._clean_incoming_headers(env)
if hmac_scope == ACCOUNT_SCOPE:
env['swift.authorize'] = authorize_same_account(account)
else:
env['swift.authorize'] = authorize_same_container(account,
container)
env['swift.authorize_override'] = True
env['REMOTE_USER'] = '.wsgi.tempurl'
qs = {'temp_url_sig': temp_url_sig,
'temp_url_expires': temp_url_expires}
if filename:
qs['filename'] = filename
env['QUERY_STRING'] = urlencode(qs)
def _start_response(status, headers, exc_info=None):
headers = self._clean_outgoing_headers(headers)
if env['REQUEST_METHOD'] == 'GET' and status[0] == '2':
# figure out the right value for content-disposition
# 1) use the value from the query string
# 2) use the value from the object metadata
# 3) use the object name (default)
out_headers = []
existing_disposition = None
for h, v in headers:
if h.lower() != 'content-disposition':
out_headers.append((h, v))
else:
existing_disposition = v
if inline_disposition:
disposition_value = 'inline'
elif filename:
disposition_value = disposition_format(filename)
elif existing_disposition:
disposition_value = existing_disposition
else:
name = basename(env['PATH_INFO'].rstrip('/'))
disposition_value = disposition_format(name)
# this is probably just paranoia, I couldn't actually get a
# newline into existing_disposition
value = disposition_value.replace('\n', '%0A')
out_headers.append(('Content-Disposition', value))
headers = out_headers
return start_response(status, headers, exc_info)
return self.app(env, _start_response)
def _get_account_and_container(self, env):
"""
Returns just the account and container for the request, if it's an
object request and one of the configured methods; otherwise, None is
returned.
:param env: The WSGI environment for the request.
:returns: (Account str, container str) or (None, None).
"""
if env['REQUEST_METHOD'] in self.methods:
try:
ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True)
except ValueError:
return (None, None)
if ver == 'v1' and obj.strip('/'):
return (acc, cont)
return (None, None)
def _get_temp_url_info(self, env):
"""
Returns the provided temporary URL parameters (sig, expires),
if given and syntactically valid. Either sig or expires could
be None if not provided. If provided, expires is also
converted to an int if possible or 0 if not, and checked for
expiration (returns 0 if expired).
:param env: The WSGI environment for the request.
:returns: (sig, expires, filename, inline) as described above.
"""
temp_url_sig = temp_url_expires = filename = inline = None
qs = parse_qs(env.get('QUERY_STRING', ''), keep_blank_values=True)
if 'temp_url_sig' in qs:
temp_url_sig = qs['temp_url_sig'][0]
if 'temp_url_expires' in qs:
try:
temp_url_expires = int(qs['temp_url_expires'][0])
except ValueError:
temp_url_expires = 0
if temp_url_expires < time():
temp_url_expires = 0
if 'filename' in qs:
filename = qs['filename'][0]
if 'inline' in qs:
inline = True
return temp_url_sig, temp_url_expires, filename, inline
def _get_keys(self, env):
"""
Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values
for the account or container, or an empty list if none are set. Each
value comes as a 2-tuple (key, scope), where scope is either
CONTAINER_SCOPE or ACCOUNT_SCOPE.
Returns 0-4 elements depending on how many keys are set in the
account's or container's metadata.
:param env: The WSGI environment for the request.
:returns: [
(X-Account-Meta-Temp-URL-Key str value, ACCOUNT_SCOPE) if set,
(X-Account-Meta-Temp-URL-Key-2 str value, ACCOUNT_SCOPE if set,
(X-Container-Meta-Temp-URL-Key str value, CONTAINER_SCOPE) if set,
(X-Container-Meta-Temp-URL-Key-2 str value, CONTAINER_SCOPE if set,
]
"""
account_info = get_account_info(env, self.app, swift_source='TU')
account_keys = get_tempurl_keys_from_metadata(account_info['meta'])
container_info = get_container_info(env, self.app, swift_source='TU')
container_keys = get_tempurl_keys_from_metadata(
container_info.get('meta', []))
return ([(ak, ACCOUNT_SCOPE) for ak in account_keys] +
[(ck, CONTAINER_SCOPE) for ck in container_keys])
def _get_hmacs(self, env, expires, scoped_keys, request_method=None):
"""
:param env: The WSGI environment for the request.
:param expires: Unix timestamp as an int for when the URL
expires.
:param scoped_keys: (key, scope) tuples like _get_keys() returns
:param request_method: Optional override of the request in
the WSGI env. For example, if a HEAD
does not match, you may wish to
override with GET to still allow the
HEAD.
:returns: a list of (hmac, scope) 2-tuples
"""
if not request_method:
request_method = env['REQUEST_METHOD']
return [
(get_hmac(request_method, env['PATH_INFO'], expires, key), scope)
for (key, scope) in scoped_keys]
def _invalid(self, env, start_response):
"""
Performs the necessary steps to indicate a WSGI 401
Unauthorized response to the request.
:param env: The WSGI environment for the request.
:param start_response: The WSGI start_response hook.
:returns: 401 response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'HEAD':
body = None
else:
body = '401 Unauthorized: Temp URL invalid\n'
return HTTPUnauthorized(body=body)(env, start_response)
def _clean_disallowed_headers(self, env, start_response):
"""
Validate the absense of disallowed headers for "unsafe" operations.
:returns: None for safe operations or swob.HTTPBadResponse if the
request includes disallowed headers.
"""
if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'OPTIONS'):
return
for h in env:
if h in self.disallowed_headers:
return HTTPBadRequest(
body='The header %r is not allowed in this tempurl' %
h[len('HTTP_'):].title().replace('_', '-'))(
env, start_response)
def _clean_incoming_headers(self, env):
"""
Removes any headers from the WSGI environment as per the
middleware configuration for incoming requests.
:param env: The WSGI environment for the request.
"""
for h in env.keys():
if h in self.incoming_allow_headers:
continue
for p in self.incoming_allow_headers_startswith:
if h.startswith(p):
break
else:
if h in self.incoming_remove_headers:
del env[h]
continue
for p in self.incoming_remove_headers_startswith:
if h.startswith(p):
del env[h]
break
def _clean_outgoing_headers(self, headers):
"""
Removes any headers as per the middleware configuration for
outgoing responses.
:param headers: A WSGI start_response style list of headers,
[('header1', 'value), ('header2', 'value),
...]
:returns: The same headers list, but with some headers
removed as per the middlware configuration for
outgoing responses.
"""
headers = HeaderKeyDict(headers)
for h in headers.keys():
if h in self.outgoing_allow_headers:
continue
for p in self.outgoing_allow_headers_startswith:
if h.startswith(p):
break
else:
if h in self.outgoing_remove_headers:
del headers[h]
continue
for p in self.outgoing_remove_headers_startswith:
if h.startswith(p):
del headers[h]
break
return headers.items()
def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
methods = conf.get('methods', 'GET HEAD PUT POST DELETE').split()
register_swift_info('tempurl', methods=methods)
return lambda app: TempURL(app, conf, methods=methods)
|
import cvxpy
GAMMA = 100
solver_map = {
'cvxopt': cvxpy.CVXOPT,
'gurobi': cvxpy.GUROBI
}
'''
- *r_list* is a list of tuples (weight, body, head)
- *body* and *head* are lists of tuples (is_constant, value/id, is_negated)
- *is_constant* is a flag, True if the truth value is known, False otherwise
- *value/id* equals the truth value if it is known,
and is the id of the corresponding variable otherwise
- *is_negated* is a flag, True if the atom is negated in the rule,
False otherwise
'''
def map_inference(rules, hard_rules, solver='cvxopt'):
vid_dict = dict()
var_ids = set()
all_rules = rules + hard_rules
for _, body, head in all_rules:
if (len(body)>0):
var_ids |= set([b[1] for b in body if not b[0]])
if (len(head)>0):
var_ids |= set([h[1] for h in head if not h[0]])
f, bounds = psl_objective(var_ids, vid_dict, rules)
hard_constraints = []
if len(hard_rules) > 0:
hard_constraints = psl_hard_constraints(vid_dict, hard_rules)
constraints = bounds + hard_constraints
objective = cvxpy.Minimize(f)
problem = cvxpy.Problem(objective, constraints)
problem.solve(solver=solver_map[solver])
results = dict()
for vid in var_ids:
results[vid] = vid_dict[vid].value
return results
def fair_map_inference(rules, hard_rules, counts, delta, fairness_measure, solver='cvxopt'):
assert(fairness_measure in ('RD', 'RR', 'RC'))
vid_dict = dict()
var_ids = set()
all_rules = rules + hard_rules
for _, body, head in all_rules:
var_ids |= set([b[1] for b in body if not b[0]])
var_ids |= set([h[1] for h in head if not h[0]])
f, bounds = psl_objective(var_ids, vid_dict, rules)
hard_constraints = []
if len(hard_rules) > 0:
hard_constraints = psl_hard_constraints(vid_dict, hard_rules)
fairness_constraints = psl_fairness_constraints(vid_dict, counts, delta, fairness_measure)
constraints= bounds + hard_constraints + fairness_constraints
objective = cvxpy.Minimize(f)
problem = cvxpy.Problem(objective, constraints)
problem.solve(solver=solver_map[solver])
results = dict()
for vid in var_ids:
results[vid] = vid_dict[vid].value
return results
def calculate(counts, vid_dict):
n1 = 0.0
n2 = 0.0
a = 0.0
c = 0.0
for f1, f2, d in counts:
f1f2 = max(f1+f2-1, 0)
nf1f2 = max(-f1+f2, 0)
n1 += f1f2
n2 += nf1f2
if d[0]:
a += max(f1f2 - d[1], 0)
c += max(nf1f2 - d[1], 0)
else:
if f1f2 == 1:
a += 1 - vid_dict[d[1]]
if nf1f2 == 1:
c += 1 - vid_dict[d[1]]
return a,c,n1,n2
def psl_fairness_constraints(vid_dict, counts, delta, fairness_measure):
if fairness_measure=='RD':
return risk_difference_constraints(counts,vid_dict,delta)
elif fairness_measure=='RR':
return risk_ratio_constraints(counts,vid_dict,delta)
elif fairness_measure=='RC':
return risk_chance_constraints(counts,vid_dict,delta)
def risk_difference_constraints(counts,vid_dict,delta):
a,c,n1,n2 = calculate(counts,vid_dict)
constraints = []
constraints.append((n2*a - n1*c - n1*n2*delta) <= 0)
constraints.append((n2*a - n1*c + n1*n2*delta) >= 0)
return constraints
def risk_ratio_constraints(counts,vid_dict,delta):
a,c,n1,n2 = calculate(counts,vid_dict)
constraints = []
constraints.append((n2*a - (1+delta)*n1*c) <= 0)
constraints.append((n2*a - (1-delta)*n1*c) >= 0)
return constraints
def risk_chance_constraints(counts,vid_dict,delta):
a,c,n1,n2 = calculate(counts,vid_dict)
constraints = []
constraints.append((-n2*a + (1+delta)*n1*c - delta*n1*n2) <= 0)
constraints.append((-n2*a + (1-delta)*n1*c + delta*n1*n2) >= 0)
return constraints
def psl_objective(var_ids, vid_dict, r_list):
constraints = []
for vid in var_ids:
var = cvxpy.Variable()
vid_dict[vid] = var
constraints += [0 <= var, var <= 1]
f = 0
for weight, body, head in r_list:
expr = 1
for b in body:
if b[0]:
y = b[1]
else:
y = vid_dict[b[1]]
if b[2]:
expr -= y
else:
expr -= (1-y)
for h in head:
if h[0]:
y = h[1]
else:
y = vid_dict[h[1]]
if h[2]:
expr -= (1-y)
else:
expr -= y
f += weight * cvxpy.pos(expr)
return f, constraints
def psl_hard_constraints(vid_dict, r_list):
constraints = []
for _, body, head in r_list:
expr = 1
for b in body:
if b[0]:
y = b[1]
else:
y = vid_dict[b[1]]
if b[2]:
expr -= y
else:
expr -= (1-y)
for h in head:
if h[0]:
y = h[1]
else:
y = vid_dict[h[1]]
if h[2]:
expr -= (1-y)
else:
expr -= y
constraints.append(expr <= 0)
return constraints
|
from model.contact import Contact
from random import randrange
def test_modify_some_contact(app, check_ui):
if app.contacts.count() == 0:
app.contacts.create(Contact(firstname="test", middlename="test", lastname="gkhgkg", nick="gkgkgk", title="gkgk",
company="gkgkjgkgk", address="gkgkjgkgk", home_tel="777", mob_tel="888",
work_tel="999", fax="777", email="[email protected]", email2="[email protected]",
homepage="ppp.com", birthday="1989"))
old_contacts = app.contacts.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(firstname="newf", middlename="newm", lastname="newl", nick="new", title="newt", company="newc", address="new", home_tel="111", mob_tel="222",
work_tel="333", fax="444", email="[email protected]", email2="[email protected]", homepage="newppp.com", birthday="1900")
contact.id = old_contacts[index].id
app.contacts.modify_contact_by_index(index,contact)
new_contacts = app.contacts.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
#assert old_contacts == new_contacts
#if check_ui:
#assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(),
#key=Contact.id_or_max)
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
from nova.scheduler import client
from nova.scheduler.client import report
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import utils as scheduler_utils
from nova.scheduler import weights
from nova import test # noqa
from nova.tests.unit.scheduler import test_scheduler
fake_numa_limit = objects.NUMATopologyLimits(cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0)
fake_limit = {"memory_mb": 1024, "disk_gb": 100, "vcpus": 2,
"numa_topology": fake_numa_limit}
fake_limit_obj = objects.SchedulerLimits.from_dict(fake_limit)
fake_alloc = {"allocations": [
{"resource_provider": {"uuid": uuids.compute_node},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc_version = "1.23"
json_alloc = jsonutils.dumps(fake_alloc)
fake_selection = objects.Selection(service_host="fake_host",
nodename="fake_node", compute_node_uuid=uuids.compute_node,
cell_uuid=uuids.cell, limits=fake_limit_obj,
allocation_request=json_alloc,
allocation_request_version=fake_alloc_version)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
@mock.patch('nova.scheduler.client.SchedulerClient')
def setUp(self, mock_client):
pc_client = mock.Mock(spec=report.SchedulerReportClient)
sched_client = mock.Mock(spec=client.SchedulerClient)
sched_client.reportclient = pc_client
mock_client.return_value = sched_client
self.placement_client = pc_client
super(FilterSchedulerTestCase, self).setUp()
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_placement_bad_comms(self, mock_get_hosts,
mock_get_all_states, mock_claim):
"""If there was a problem communicating with the Placement service,
alloc_reqs_by_rp_uuid will be None and we need to avoid trying to claim
in the Placement API.
"""
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_group=None, instance_uuid=uuids.instance)
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
limits={})
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
visited_instances = set([])
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
return all_host_states
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.instance]
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
None, mock.sentinel.provider_summaries)
expected_hosts = [[objects.Selection.from_host_state(host_state)]]
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
self.assertEqual(len(selected_hosts), 1)
self.assertEqual(expected_hosts, selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
# And ensure we never called claim_resources()
self.assertFalse(mock_claim.called)
# Make sure that the RequestSpec.instance_uuid is not dirty.
self.assertEqual(sorted(instance_uuids), sorted(visited_instances))
self.assertEqual(0, len(spec_obj.obj_what_changed()),
spec_obj.obj_what_changed())
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_old_conductor(self, mock_get_hosts,
mock_get_all_states, mock_claim):
"""Old conductor can call scheduler without the instance_uuids
parameter. When this happens, we need to ensure we do not attempt to
claim resources in the placement API since obviously we need instance
UUIDs to perform those claims.
"""
group = objects.InstanceGroup(hosts=[])
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_group=group)
host_state = mock.Mock(spec=host_manager.HostState,
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
limits={}, cell_uuid=uuids.cell, instances={})
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj,
instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
self.assertEqual(len(selected_hosts), 1)
expected_host = objects.Selection.from_host_state(host_state)
self.assertEqual([[expected_host]], selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
# And ensure we never called claim_resources()
self.assertFalse(mock_claim.called)
# And that the host is added to the server group but there are no
# instances tracked in the host_state.
self.assertIn(host_state.host, group.hosts)
self.assertEqual(0, len(host_state.instances))
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def _test_schedule_successful_claim(self, mock_get_hosts,
mock_get_all_states, mock_claim, num_instances=1):
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
cell_uuid=uuids.cell1, limits={})
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance]
fake_alloc = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)
sel_obj = objects.Selection.from_host_state(host_state,
allocation_request=fake_alloc)
expected_selection = [[sel_obj]]
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called()
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
allocation_request_version=None)
self.assertEqual(len(selected_hosts), 1)
self.assertEqual(expected_selection, selected_hosts)
# Ensure that we have consumed the resources on the chosen host states
host_state.consume_from_request.assert_called_once_with(spec_obj)
def test_schedule_successful_claim(self):
self._test_schedule_successful_claim()
def test_schedule_old_reqspec_and_move_operation(self):
"""This test is for verifying that in case of a move operation with an
original RequestSpec created for 3 concurrent instances, we only verify
the instance that is moved.
"""
self._test_schedule_successful_claim(num_instances=3)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_cleanup_allocations')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_unsuccessful_claim(self, mock_get_hosts,
mock_get_all_states, mock_claim, mock_cleanup):
"""Tests that we return an empty list if we are unable to successfully
claim resources for the instance
"""
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = False
instance_uuids = [uuids.instance]
alloc_reqs_by_rp_uuid = {
uuids.cn1: [{"allocations": mock.sentinel.alloc_req}],
}
ctx = mock.Mock()
fake_version = "1.99"
self.assertRaises(exception.NoValidHost, self.driver._schedule, ctx,
spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
mock.sentinel.provider_summaries,
allocation_request_version=fake_version)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
allocation_request_version=fake_version)
mock_cleanup.assert_not_called()
# Ensure that we have consumed the resources on the chosen host states
self.assertFalse(host_state.consume_from_request.called)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_cleanup_allocations')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_not_all_instance_clean_claimed(self, mock_get_hosts,
mock_get_all_states, mock_claim, mock_cleanup):
"""Tests that we clean up previously-allocated instances if not all
instances could be scheduled
"""
spec_obj = objects.RequestSpec(
num_instances=2,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState,
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
cell_uuid=uuids.cell1, limits={}, updated='fake')
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.side_effect = [
all_host_states, # first instance: return all the hosts (only one)
[], # second: act as if no more hosts that meet criteria
all_host_states, # the final call when creating alternates
]
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
fake_alloc = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn1: [fake_alloc]}
ctx = mock.Mock()
self.assertRaises(exception.NoValidHost, self.driver._schedule, ctx,
spec_obj, instance_uuids, alloc_reqs_by_rp_uuid,
mock.sentinel.provider_summaries)
# Ensure we cleaned up the first successfully-claimed instance
mock_cleanup.assert_called_once_with(ctx, [uuids.instance1])
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_selection_alloc_requests_for_alts(self, mock_get_hosts,
mock_get_all_states, mock_claim):
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
host_state0 = mock.Mock(spec=host_manager.HostState,
host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
cell_uuid=uuids.cell, limits={})
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
fake_alloc0 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn0},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc1 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc2 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn2},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0],
uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries,
return_alternates=True)
sel0 = objects.Selection.from_host_state(host_state0,
allocation_request=fake_alloc0)
sel1 = objects.Selection.from_host_state(host_state1,
allocation_request=fake_alloc1)
sel2 = objects.Selection.from_host_state(host_state2,
allocation_request=fake_alloc2)
expected_selection = [[sel0, sel1, sel2]]
self.assertEqual(expected_selection, selected_hosts)
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_selection_alloc_requests_no_alts(self, mock_get_hosts,
mock_get_all_states, mock_claim):
spec_obj = objects.RequestSpec(
num_instances=1,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
host_state0 = mock.Mock(spec=host_manager.HostState,
host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
cell_uuid=uuids.cell, limits={})
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
fake_alloc0 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn0},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc1 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn1},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
fake_alloc2 = {"allocations": [
{"resource_provider": {"uuid": uuids.cn2},
"resources": {"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}
}]}
alloc_reqs_by_rp_uuid = {uuids.cn0: [fake_alloc0],
uuids.cn1: [fake_alloc1], uuids.cn2: [fake_alloc2]}
ctx = mock.Mock()
selected_hosts = self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries,
return_alternates=False)
sel0 = objects.Selection.from_host_state(host_state0,
allocation_request=fake_alloc0)
expected_selection = [[sel0]]
self.assertEqual(expected_selection, selected_hosts)
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_instance_group(self, mock_get_hosts,
mock_get_all_states, mock_claim):
"""Test that since the request spec object contains an instance group
object, that upon choosing a host in the primary schedule loop,
that we update the request spec's instance group information
"""
num_instances = 2
ig = objects.InstanceGroup(hosts=[])
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_group=ig, instance_uuid=uuids.instance0)
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
nodename="node1", limits={}, uuid=uuids.cn1,
cell_uuid=uuids.cell1, instances={})
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
nodename="node2", limits={}, uuid=uuids.cn2,
cell_uuid=uuids.cell2, instances={})
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
alloc_reqs_by_rp_uuid = {
uuids.cn1: [{"allocations": "fake_cn1_alloc"}],
uuids.cn2: [{"allocations": "fake_cn2_alloc"}],
}
# Simulate host 1 and host 2 being randomly returned first by
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
return [hs1, hs2]
return [hs2, hs1]
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
]
ctx = mock.Mock()
self.driver._schedule(ctx, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)
# Check that we called claim_resources() for both the first and second
# host state
claim_calls = [
mock.call(ctx.elevated.return_value, self.placement_client,
spec_obj, uuids.instance0,
alloc_reqs_by_rp_uuid[uuids.cn2][0],
allocation_request_version=None),
mock.call(ctx.elevated.return_value, self.placement_client,
spec_obj, uuids.instance1,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
allocation_request_version=None),
]
mock_claim.assert_has_calls(claim_calls)
# Check that _get_sorted_hosts() is called twice and that the
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
mock.call(spec_obj, all_host_states, 0),
mock.call(spec_obj, [hs2, hs1], 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
# save in the instance group object
self.assertEqual(['host2', 'host1'], ig.hosts)
self.assertEqual({}, ig.obj_get_changes())
# Assert that we updated HostState.instances for each host.
self.assertIn(uuids.instance0, hs2.instances)
self.assertIn(uuids.instance1, hs1.instances)
# Make sure that the RequestSpec.instance_uuid is not dirty.
self.assertEqual(sorted(instance_uuids), sorted(visited_instances))
self.assertEqual(0, len(spec_obj.obj_what_changed()),
spec_obj.obj_what_changed())
@mock.patch('random.choice', side_effect=lambda x: x[1])
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts(self, mock_filt, mock_weighed, mock_rand):
"""Tests the call that returns a sorted list of hosts by calling the
host manager's filtering and weighing routines
"""
self.flags(host_subset_size=2, group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We override random.choice() to pick the **second** element of the
# returned weighed hosts list, which is the host state #2. This tests
# the code path that combines the randomly-chosen host with the
# remaining list of weighed host state objects
self.assertEqual([hs2, hs1], results)
@mock.patch('random.choice', side_effect=lambda x: x[0])
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_subset_less_than_num_weighed(self, mock_filt,
mock_weighed, mock_rand):
"""Tests that when we have >1 weighed hosts but a host subset size of
1, that we always pick the first host in the weighed host
"""
self.flags(host_subset_size=1, group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We should be randomly selecting only from a list of one host state
mock_rand.assert_called_once_with([hs1])
self.assertEqual([hs1, hs2], results)
@mock.patch('random.choice', side_effect=lambda x: x[0])
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_subset_greater_than_num_weighed(self, mock_filt,
mock_weighed, mock_rand):
"""Hosts should still be chosen if host subset size is larger than
number of weighed hosts.
"""
self.flags(host_subset_size=20, group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We overrode random.choice() to return the first element in the list,
# so even though we had a host_subset_size greater than the number of
# weighed hosts (2), we just random.choice() on the entire set of
# weighed hosts and thus return [hs1, hs2]
self.assertEqual([hs1, hs2], results)
@mock.patch('random.shuffle', side_effect=lambda x: x.reverse())
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_shuffle_top_equal(self, mock_filt, mock_weighed,
mock_shuffle):
"""Tests that top best weighed hosts are shuffled when enabled.
"""
self.flags(host_subset_size=1, group='filter_scheduler')
self.flags(shuffle_best_same_weighed_hosts=True,
group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1')
hs2 = mock.Mock(spec=host_manager.HostState, host='host2')
hs3 = mock.Mock(spec=host_manager.HostState, host='host3')
hs4 = mock.Mock(spec=host_manager.HostState, host='host4')
all_host_states = [hs1, hs2, hs3, hs4]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0),
weights.WeighedHost(hs2, 1.0),
weights.WeighedHost(hs3, 0.5),
weights.WeighedHost(hs4, 0.5),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We override random.shuffle() to reverse the list, thus the
# head of the list should become [host#2, host#1]
# (as the host_subset_size is 1) and the tail should stay the same.
self.assertEqual([hs2, hs1, hs3, hs4], results)
def test_cleanup_allocations(self):
instance_uuids = []
# Check we don't do anything if there's no instance UUIDs to cleanup
# allocations for
pc = self.placement_client
self.driver._cleanup_allocations(self.context, instance_uuids)
self.assertFalse(pc.delete_allocation_for_instance.called)
instance_uuids = [uuids.instance1, uuids.instance2]
self.driver._cleanup_allocations(self.context, instance_uuids)
exp_calls = [mock.call(self.context, uuids.instance1),
mock.call(self.context, uuids.instance2)]
pc.delete_allocation_for_instance.assert_has_calls(exp_calls)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
node = "fakenode"
scheduler_utils._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
selection = objects.Selection(service_host="host", nodename="node",
cell_uuid=uuids.cell)
scheduler_utils.populate_filter_properties(filter_properties,
selection)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_schedule')
def test_select_destinations_match_num_instances(self, mock_schedule):
"""Tests that the select_destinations() method returns the list of
hosts from the _schedule() method when the number of returned hosts
equals the number of instance UUIDs passed in.
"""
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
num_instances=1,
image=None,
numa_topology=None,
pci_requests=None,
instance_uuid=uuids.instance_id)
mock_schedule.return_value = [[fake_selection]]
dests = self.driver.select_destinations(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version, False)
self.assertEqual([[fake_selection]], dests)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_schedule')
def test_select_destinations_for_move_ops(self, mock_schedule):
"""Tests that the select_destinations() method verifies the number of
hosts returned from the _schedule() method against the number of
instance UUIDs passed as a parameter and not against the RequestSpec
num_instances field since the latter could be wrong in case of a move
operation.
"""
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
num_instances=2,
image=None,
numa_topology=None,
pci_requests=None,
instance_uuid=uuids.instance_id)
mock_schedule.return_value = [[fake_selection]]
dests = self.driver.select_destinations(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version, False)
self.assertEqual([[fake_selection]], dests)
@mock.patch('nova.scheduler.utils.claim_resources', return_value=True)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_fewer_num_instances(self, mock_get_hosts,
mock_get_all_states, mock_claim):
"""Tests that the _schedule() method properly handles
resetting host state objects and raising NoValidHost when there are not
enough hosts available.
"""
spec_obj = objects.RequestSpec(
num_instances=2,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
limits={}, updated="Not None")
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.side_effect = [all_host_states, []]
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
self.assertRaises(exception.NoValidHost, self.driver._schedule,
self.context, spec_obj, instance_uuids, fake_allocs_by_rp,
mock.sentinel.p_sums)
self.assertIsNone(host_state.updated)
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_sorted_hosts")
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_all_host_states")
def _test_alternates_returned(self, mock_get_all_hosts, mock_sorted,
mock_claim, mock_consume, num_instances=2, num_alternates=2):
all_host_states = []
alloc_reqs = {}
for num in range(10):
host_name = "host%s" % num
hs = host_manager.HostState(host_name, "node%s" % num,
uuids.cell)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
mock_sorted.return_value = all_host_states
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
instance_uuids = [getattr(uuids, "inst%s" % num)
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
self.assertEqual(num_instances, len(dests))
# Filtering and weighing hosts should be called num_instances + 1 times
# unless we're not getting alternates, and then just num_instances
self.assertEqual(num_instances + 1
if num_alternates > 0 and num_instances > 1
else num_instances,
mock_sorted.call_count,
'Unexpected number of calls to filter hosts for %s '
'instances.' % num_instances)
selected_hosts = [dest[0] for dest in dests]
for dest in dests:
self.assertEqual(total_returned, len(dest))
# Verify that there are no duplicates among a destination
self.assertEqual(len(dest), len(set(dest)))
# Verify that none of the selected hosts appear in the alternates.
for alt in dest[1:]:
self.assertNotIn(alt, selected_hosts)
def test_alternates_returned(self):
self._test_alternates_returned(num_instances=1, num_alternates=1)
self._test_alternates_returned(num_instances=3, num_alternates=0)
self._test_alternates_returned(num_instances=1, num_alternates=4)
self._test_alternates_returned(num_instances=2, num_alternates=3)
self._test_alternates_returned(num_instances=8, num_alternates=8)
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_sorted_hosts")
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_all_host_states")
def test_alternates_same_cell(self, mock_get_all_hosts, mock_sorted,
mock_claim, mock_consume):
"""Tests getting alternates plus claims where the hosts are spread
across two cells.
"""
all_host_states = []
alloc_reqs = {}
for num in range(10):
host_name = "host%s" % num
cell_uuid = uuids.cell1 if num % 2 else uuids.cell2
hs = host_manager.HostState(host_name, "node%s" % num,
cell_uuid)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
mock_sorted.side_effect = [all_host_states,
list(reversed(all_host_states)),
all_host_states]
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
instance_uuids = [uuids.inst1, uuids.inst2]
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
# There should be max_attempts hosts per instance (1 selected, 2 alts)
self.assertEqual(total_returned, len(dests[0]))
self.assertEqual(total_returned, len(dests[1]))
# Verify that the two selected hosts are not in the same cell.
self.assertNotEqual(dests[0][0].cell_uuid, dests[1][0].cell_uuid)
for dest in dests:
selected_host = dest[0]
selected_cell_uuid = selected_host.cell_uuid
for alternate in dest[1:]:
self.assertEqual(alternate.cell_uuid, selected_cell_uuid)
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_sorted_hosts")
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_all_host_states")
def _test_not_enough_alternates(self, mock_get_all_hosts, mock_sorted,
mock_claim, mock_consume, num_hosts, max_attempts):
all_host_states = []
alloc_reqs = {}
for num in range(num_hosts):
host_name = "host%s" % num
hs = host_manager.HostState(host_name, "node%s" % num,
uuids.cell)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
mock_sorted.return_value = all_host_states
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
instance_uuids = [uuids.inst1, uuids.inst2]
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
self.assertEqual(num_instances, len(dests))
selected_hosts = [dest[0] for dest in dests]
# The number returned for each destination should be the less of the
# number of available host and the max_attempts setting.
expected_number = min(num_hosts, max_attempts)
for dest in dests:
self.assertEqual(expected_number, len(dest))
# Verify that there are no duplicates among a destination
self.assertEqual(len(dest), len(set(dest)))
# Verify that none of the selected hosts appear in the alternates.
for alt in dest[1:]:
self.assertNotIn(alt, selected_hosts)
def test_not_enough_alternates(self):
self._test_not_enough_alternates(num_hosts=100, max_attempts=5)
self._test_not_enough_alternates(num_hosts=5, max_attempts=5)
self._test_not_enough_alternates(num_hosts=3, max_attempts=5)
self._test_not_enough_alternates(num_hosts=20, max_attempts=5)
@mock.patch('nova.compute.utils.notify_about_scheduler_action')
@mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
def test_select_destinations_notifications(self, mock_schedule,
mock_notify):
mock_schedule.return_value = ([[mock.Mock()]], [[mock.Mock()]])
with mock.patch.object(self.driver.notifier, 'info') as mock_info:
flavor = objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1,
disabled=False,
is_public=True,
name="small_flavor")
expected = {'num_instances': 1,
'instance_properties': {
'uuid': uuids.instance,
'ephemeral_gb': 0,
'memory_mb': 512,
'vcpus': 1,
'root_gb': 512},
'instance_type': flavor,
'image': {}}
spec_obj = objects.RequestSpec(num_instances=1,
flavor=flavor,
instance_uuid=uuids.instance)
self.driver.select_destinations(self.context, spec_obj,
[uuids.instance], {}, None)
expected = [
mock.call(self.context, 'scheduler.select_destinations.start',
dict(request_spec=expected)),
mock.call(self.context, 'scheduler.select_destinations.end',
dict(request_spec=expected))]
self.assertEqual(expected, mock_info.call_args_list)
mock_notify.assert_has_calls([
mock.call(context=self.context, request_spec=spec_obj,
action='select_destinations', phase='start'),
mock.call(context=self.context, request_spec=spec_obj,
action='select_destinations', phase='end')])
def test_get_all_host_states_provider_summaries_is_none(self):
"""Tests that HostManager.get_host_states_by_uuids is called with
compute_uuids being None when the incoming provider_summaries is None.
"""
with mock.patch.object(self.driver.host_manager,
'get_host_states_by_uuids') as get_host_states:
self.driver._get_all_host_states(
mock.sentinel.ctxt, mock.sentinel.spec_obj, None)
# Make sure get_host_states_by_uuids was called with
# compute_uuids being None.
get_host_states.assert_called_once_with(
mock.sentinel.ctxt, None, mock.sentinel.spec_obj)
def test_get_all_host_states_provider_summaries_is_empty(self):
"""Tests that HostManager.get_host_states_by_uuids is called with
compute_uuids being [] when the incoming provider_summaries is {}.
"""
with mock.patch.object(self.driver.host_manager,
'get_host_states_by_uuids') as get_host_states:
self.driver._get_all_host_states(
mock.sentinel.ctxt, mock.sentinel.spec_obj, {})
# Make sure get_host_states_by_uuids was called with
# compute_uuids being [].
get_host_states.assert_called_once_with(
mock.sentinel.ctxt, [], mock.sentinel.spec_obj)
|
__problem_title__ = "Sliders"
__problem_url___ = "https://projecteuler.net/problem=244"
__problem_description__ = "You probably know the game . Here, instead of numbered tiles, we have " \
"seven red tiles and eight blue tiles. A move is denoted by the " \
"uppercase initial of the direction (Left, Right, Up, Down) in which " \
"the tile is slid, e.g. starting from configuration ( ), by the " \
"sequence we reach the configuration ( ): For each path, its checksum " \
"is calculated by (pseudocode): For the sequence given above, the " \
"checksum would be 19761398. Now, starting from configuration ( ), " \
"find all shortest ways to reach configuration ( ). What is the sum of " \
"all checksums for the paths having the minimal length?"
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.